]> Pileus Git - ~andy/linux/blob - drivers/net/wireless/ath/ath10k/pci.c
ath10k: fix possible memory leak in ath10k_pci_probe()
[~andy/linux] / drivers / net / wireless / ath / ath10k / pci.c
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22
23 #include "core.h"
24 #include "debug.h"
25
26 #include "targaddrs.h"
27 #include "bmi.h"
28
29 #include "hif.h"
30 #include "htc.h"
31
32 #include "ce.h"
33 #include "pci.h"
34
35 static unsigned int ath10k_target_ps;
36 module_param(ath10k_target_ps, uint, 0644);
37 MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
38
39 #define QCA988X_2_0_DEVICE_ID   (0x003c)
40
41 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
42         { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
43         {0}
44 };
45
46 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
47                                        u32 *data);
48
49 static void ath10k_pci_process_ce(struct ath10k *ar);
50 static int ath10k_pci_post_rx(struct ath10k *ar);
51 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
52                                              int num);
53 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
54 static void ath10k_pci_stop_ce(struct ath10k *ar);
55 static void ath10k_pci_device_reset(struct ath10k *ar);
56 static int ath10k_pci_reset_target(struct ath10k *ar);
57 static int ath10k_pci_start_intr(struct ath10k *ar);
58 static void ath10k_pci_stop_intr(struct ath10k *ar);
59
60 static const struct ce_attr host_ce_config_wlan[] = {
61         /* CE0: host->target HTC control and raw streams */
62         {
63                 .flags = CE_ATTR_FLAGS,
64                 .src_nentries = 16,
65                 .src_sz_max = 256,
66                 .dest_nentries = 0,
67         },
68
69         /* CE1: target->host HTT + HTC control */
70         {
71                 .flags = CE_ATTR_FLAGS,
72                 .src_nentries = 0,
73                 .src_sz_max = 512,
74                 .dest_nentries = 512,
75         },
76
77         /* CE2: target->host WMI */
78         {
79                 .flags = CE_ATTR_FLAGS,
80                 .src_nentries = 0,
81                 .src_sz_max = 2048,
82                 .dest_nentries = 32,
83         },
84
85         /* CE3: host->target WMI */
86         {
87                 .flags = CE_ATTR_FLAGS,
88                 .src_nentries = 32,
89                 .src_sz_max = 2048,
90                 .dest_nentries = 0,
91         },
92
93         /* CE4: host->target HTT */
94         {
95                 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
96                 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
97                 .src_sz_max = 256,
98                 .dest_nentries = 0,
99         },
100
101         /* CE5: unused */
102         {
103                 .flags = CE_ATTR_FLAGS,
104                 .src_nentries = 0,
105                 .src_sz_max = 0,
106                 .dest_nentries = 0,
107         },
108
109         /* CE6: target autonomous hif_memcpy */
110         {
111                 .flags = CE_ATTR_FLAGS,
112                 .src_nentries = 0,
113                 .src_sz_max = 0,
114                 .dest_nentries = 0,
115         },
116
117         /* CE7: ce_diag, the Diagnostic Window */
118         {
119                 .flags = CE_ATTR_FLAGS,
120                 .src_nentries = 2,
121                 .src_sz_max = DIAG_TRANSFER_LIMIT,
122                 .dest_nentries = 2,
123         },
124 };
125
126 /* Target firmware's Copy Engine configuration. */
127 static const struct ce_pipe_config target_ce_config_wlan[] = {
128         /* CE0: host->target HTC control and raw streams */
129         {
130                 .pipenum = 0,
131                 .pipedir = PIPEDIR_OUT,
132                 .nentries = 32,
133                 .nbytes_max = 256,
134                 .flags = CE_ATTR_FLAGS,
135                 .reserved = 0,
136         },
137
138         /* CE1: target->host HTT + HTC control */
139         {
140                 .pipenum = 1,
141                 .pipedir = PIPEDIR_IN,
142                 .nentries = 32,
143                 .nbytes_max = 512,
144                 .flags = CE_ATTR_FLAGS,
145                 .reserved = 0,
146         },
147
148         /* CE2: target->host WMI */
149         {
150                 .pipenum = 2,
151                 .pipedir = PIPEDIR_IN,
152                 .nentries = 32,
153                 .nbytes_max = 2048,
154                 .flags = CE_ATTR_FLAGS,
155                 .reserved = 0,
156         },
157
158         /* CE3: host->target WMI */
159         {
160                 .pipenum = 3,
161                 .pipedir = PIPEDIR_OUT,
162                 .nentries = 32,
163                 .nbytes_max = 2048,
164                 .flags = CE_ATTR_FLAGS,
165                 .reserved = 0,
166         },
167
168         /* CE4: host->target HTT */
169         {
170                 .pipenum = 4,
171                 .pipedir = PIPEDIR_OUT,
172                 .nentries = 256,
173                 .nbytes_max = 256,
174                 .flags = CE_ATTR_FLAGS,
175                 .reserved = 0,
176         },
177
178         /* NB: 50% of src nentries, since tx has 2 frags */
179
180         /* CE5: unused */
181         {
182                 .pipenum = 5,
183                 .pipedir = PIPEDIR_OUT,
184                 .nentries = 32,
185                 .nbytes_max = 2048,
186                 .flags = CE_ATTR_FLAGS,
187                 .reserved = 0,
188         },
189
190         /* CE6: Reserved for target autonomous hif_memcpy */
191         {
192                 .pipenum = 6,
193                 .pipedir = PIPEDIR_INOUT,
194                 .nentries = 32,
195                 .nbytes_max = 4096,
196                 .flags = CE_ATTR_FLAGS,
197                 .reserved = 0,
198         },
199
200         /* CE7 used only by Host */
201 };
202
203 /*
204  * Diagnostic read/write access is provided for startup/config/debug usage.
205  * Caller must guarantee proper alignment, when applicable, and single user
206  * at any moment.
207  */
208 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
209                                     int nbytes)
210 {
211         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
212         int ret = 0;
213         u32 buf;
214         unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
215         unsigned int id;
216         unsigned int flags;
217         struct ath10k_ce_pipe *ce_diag;
218         /* Host buffer address in CE space */
219         u32 ce_data;
220         dma_addr_t ce_data_base = 0;
221         void *data_buf = NULL;
222         int i;
223
224         /*
225          * This code cannot handle reads to non-memory space. Redirect to the
226          * register read fn but preserve the multi word read capability of
227          * this fn
228          */
229         if (address < DRAM_BASE_ADDRESS) {
230                 if (!IS_ALIGNED(address, 4) ||
231                     !IS_ALIGNED((unsigned long)data, 4))
232                         return -EIO;
233
234                 while ((nbytes >= 4) &&  ((ret = ath10k_pci_diag_read_access(
235                                            ar, address, (u32 *)data)) == 0)) {
236                         nbytes -= sizeof(u32);
237                         address += sizeof(u32);
238                         data += sizeof(u32);
239                 }
240                 return ret;
241         }
242
243         ce_diag = ar_pci->ce_diag;
244
245         /*
246          * Allocate a temporary bounce buffer to hold caller's data
247          * to be DMA'ed from Target. This guarantees
248          *   1) 4-byte alignment
249          *   2) Buffer in DMA-able space
250          */
251         orig_nbytes = nbytes;
252         data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
253                                                          orig_nbytes,
254                                                          &ce_data_base);
255
256         if (!data_buf) {
257                 ret = -ENOMEM;
258                 goto done;
259         }
260         memset(data_buf, 0, orig_nbytes);
261
262         remaining_bytes = orig_nbytes;
263         ce_data = ce_data_base;
264         while (remaining_bytes) {
265                 nbytes = min_t(unsigned int, remaining_bytes,
266                                DIAG_TRANSFER_LIMIT);
267
268                 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
269                 if (ret != 0)
270                         goto done;
271
272                 /* Request CE to send from Target(!) address to Host buffer */
273                 /*
274                  * The address supplied by the caller is in the
275                  * Target CPU virtual address space.
276                  *
277                  * In order to use this address with the diagnostic CE,
278                  * convert it from Target CPU virtual address space
279                  * to CE address space
280                  */
281                 ath10k_pci_wake(ar);
282                 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
283                                                      address);
284                 ath10k_pci_sleep(ar);
285
286                 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
287                                  0);
288                 if (ret)
289                         goto done;
290
291                 i = 0;
292                 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
293                                                      &completed_nbytes,
294                                                      &id) != 0) {
295                         mdelay(1);
296                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
297                                 ret = -EBUSY;
298                                 goto done;
299                         }
300                 }
301
302                 if (nbytes != completed_nbytes) {
303                         ret = -EIO;
304                         goto done;
305                 }
306
307                 if (buf != (u32) address) {
308                         ret = -EIO;
309                         goto done;
310                 }
311
312                 i = 0;
313                 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
314                                                      &completed_nbytes,
315                                                      &id, &flags) != 0) {
316                         mdelay(1);
317
318                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
319                                 ret = -EBUSY;
320                                 goto done;
321                         }
322                 }
323
324                 if (nbytes != completed_nbytes) {
325                         ret = -EIO;
326                         goto done;
327                 }
328
329                 if (buf != ce_data) {
330                         ret = -EIO;
331                         goto done;
332                 }
333
334                 remaining_bytes -= nbytes;
335                 address += nbytes;
336                 ce_data += nbytes;
337         }
338
339 done:
340         if (ret == 0) {
341                 /* Copy data from allocated DMA buf to caller's buf */
342                 WARN_ON_ONCE(orig_nbytes & 3);
343                 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
344                         ((u32 *)data)[i] =
345                                 __le32_to_cpu(((__le32 *)data_buf)[i]);
346                 }
347         } else
348                 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
349                            __func__, address);
350
351         if (data_buf)
352                 pci_free_consistent(ar_pci->pdev, orig_nbytes,
353                                     data_buf, ce_data_base);
354
355         return ret;
356 }
357
358 /* Read 4-byte aligned data from Target memory or register */
359 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
360                                        u32 *data)
361 {
362         /* Assume range doesn't cross this boundary */
363         if (address >= DRAM_BASE_ADDRESS)
364                 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
365
366         ath10k_pci_wake(ar);
367         *data = ath10k_pci_read32(ar, address);
368         ath10k_pci_sleep(ar);
369         return 0;
370 }
371
372 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
373                                      const void *data, int nbytes)
374 {
375         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
376         int ret = 0;
377         u32 buf;
378         unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
379         unsigned int id;
380         unsigned int flags;
381         struct ath10k_ce_pipe *ce_diag;
382         void *data_buf = NULL;
383         u32 ce_data;    /* Host buffer address in CE space */
384         dma_addr_t ce_data_base = 0;
385         int i;
386
387         ce_diag = ar_pci->ce_diag;
388
389         /*
390          * Allocate a temporary bounce buffer to hold caller's data
391          * to be DMA'ed to Target. This guarantees
392          *   1) 4-byte alignment
393          *   2) Buffer in DMA-able space
394          */
395         orig_nbytes = nbytes;
396         data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
397                                                          orig_nbytes,
398                                                          &ce_data_base);
399         if (!data_buf) {
400                 ret = -ENOMEM;
401                 goto done;
402         }
403
404         /* Copy caller's data to allocated DMA buf */
405         WARN_ON_ONCE(orig_nbytes & 3);
406         for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
407                 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
408
409         /*
410          * The address supplied by the caller is in the
411          * Target CPU virtual address space.
412          *
413          * In order to use this address with the diagnostic CE,
414          * convert it from
415          *    Target CPU virtual address space
416          * to
417          *    CE address space
418          */
419         ath10k_pci_wake(ar);
420         address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
421         ath10k_pci_sleep(ar);
422
423         remaining_bytes = orig_nbytes;
424         ce_data = ce_data_base;
425         while (remaining_bytes) {
426                 /* FIXME: check cast */
427                 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
428
429                 /* Set up to receive directly into Target(!) address */
430                 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
431                 if (ret != 0)
432                         goto done;
433
434                 /*
435                  * Request CE to send caller-supplied data that
436                  * was copied to bounce buffer to Target(!) address.
437                  */
438                 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
439                                      nbytes, 0, 0);
440                 if (ret != 0)
441                         goto done;
442
443                 i = 0;
444                 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
445                                                      &completed_nbytes,
446                                                      &id) != 0) {
447                         mdelay(1);
448
449                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
450                                 ret = -EBUSY;
451                                 goto done;
452                         }
453                 }
454
455                 if (nbytes != completed_nbytes) {
456                         ret = -EIO;
457                         goto done;
458                 }
459
460                 if (buf != ce_data) {
461                         ret = -EIO;
462                         goto done;
463                 }
464
465                 i = 0;
466                 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
467                                                      &completed_nbytes,
468                                                      &id, &flags) != 0) {
469                         mdelay(1);
470
471                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
472                                 ret = -EBUSY;
473                                 goto done;
474                         }
475                 }
476
477                 if (nbytes != completed_nbytes) {
478                         ret = -EIO;
479                         goto done;
480                 }
481
482                 if (buf != address) {
483                         ret = -EIO;
484                         goto done;
485                 }
486
487                 remaining_bytes -= nbytes;
488                 address += nbytes;
489                 ce_data += nbytes;
490         }
491
492 done:
493         if (data_buf) {
494                 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
495                                     ce_data_base);
496         }
497
498         if (ret != 0)
499                 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
500                            address);
501
502         return ret;
503 }
504
505 /* Write 4B data to Target memory or register */
506 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
507                                         u32 data)
508 {
509         /* Assume range doesn't cross this boundary */
510         if (address >= DRAM_BASE_ADDRESS)
511                 return ath10k_pci_diag_write_mem(ar, address, &data,
512                                                  sizeof(u32));
513
514         ath10k_pci_wake(ar);
515         ath10k_pci_write32(ar, address, data);
516         ath10k_pci_sleep(ar);
517         return 0;
518 }
519
520 static bool ath10k_pci_target_is_awake(struct ath10k *ar)
521 {
522         void __iomem *mem = ath10k_pci_priv(ar)->mem;
523         u32 val;
524         val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
525                        RTC_STATE_ADDRESS);
526         return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
527 }
528
529 static int ath10k_pci_wait(struct ath10k *ar)
530 {
531         int n = 100;
532
533         while (n-- && !ath10k_pci_target_is_awake(ar))
534                 msleep(10);
535
536         if (n < 0) {
537                 ath10k_warn("Unable to wakeup target\n");
538                 return -ETIMEDOUT;
539         }
540
541         return 0;
542 }
543
544 int ath10k_do_pci_wake(struct ath10k *ar)
545 {
546         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
547         void __iomem *pci_addr = ar_pci->mem;
548         int tot_delay = 0;
549         int curr_delay = 5;
550
551         if (atomic_read(&ar_pci->keep_awake_count) == 0) {
552                 /* Force AWAKE */
553                 iowrite32(PCIE_SOC_WAKE_V_MASK,
554                           pci_addr + PCIE_LOCAL_BASE_ADDRESS +
555                           PCIE_SOC_WAKE_ADDRESS);
556         }
557         atomic_inc(&ar_pci->keep_awake_count);
558
559         if (ar_pci->verified_awake)
560                 return 0;
561
562         for (;;) {
563                 if (ath10k_pci_target_is_awake(ar)) {
564                         ar_pci->verified_awake = true;
565                         return 0;
566                 }
567
568                 if (tot_delay > PCIE_WAKE_TIMEOUT) {
569                         ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
570                                     PCIE_WAKE_TIMEOUT,
571                                     atomic_read(&ar_pci->keep_awake_count));
572                         return -ETIMEDOUT;
573                 }
574
575                 udelay(curr_delay);
576                 tot_delay += curr_delay;
577
578                 if (curr_delay < 50)
579                         curr_delay += 5;
580         }
581 }
582
583 void ath10k_do_pci_sleep(struct ath10k *ar)
584 {
585         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
586         void __iomem *pci_addr = ar_pci->mem;
587
588         if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
589                 /* Allow sleep */
590                 ar_pci->verified_awake = false;
591                 iowrite32(PCIE_SOC_WAKE_RESET,
592                           pci_addr + PCIE_LOCAL_BASE_ADDRESS +
593                           PCIE_SOC_WAKE_ADDRESS);
594         }
595 }
596
597 /*
598  * FIXME: Handle OOM properly.
599  */
600 static inline
601 struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
602 {
603         struct ath10k_pci_compl *compl = NULL;
604
605         spin_lock_bh(&pipe_info->pipe_lock);
606         if (list_empty(&pipe_info->compl_free)) {
607                 ath10k_warn("Completion buffers are full\n");
608                 goto exit;
609         }
610         compl = list_first_entry(&pipe_info->compl_free,
611                                  struct ath10k_pci_compl, list);
612         list_del(&compl->list);
613 exit:
614         spin_unlock_bh(&pipe_info->pipe_lock);
615         return compl;
616 }
617
618 /* Called by lower (CE) layer when a send to Target completes. */
619 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
620 {
621         struct ath10k *ar = ce_state->ar;
622         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
623         struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
624         struct ath10k_pci_compl *compl;
625         void *transfer_context;
626         u32 ce_data;
627         unsigned int nbytes;
628         unsigned int transfer_id;
629
630         while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
631                                              &ce_data, &nbytes,
632                                              &transfer_id) == 0) {
633                 compl = get_free_compl(pipe_info);
634                 if (!compl)
635                         break;
636
637                 compl->state = ATH10K_PCI_COMPL_SEND;
638                 compl->ce_state = ce_state;
639                 compl->pipe_info = pipe_info;
640                 compl->skb = transfer_context;
641                 compl->nbytes = nbytes;
642                 compl->transfer_id = transfer_id;
643                 compl->flags = 0;
644
645                 /*
646                  * Add the completion to the processing queue.
647                  */
648                 spin_lock_bh(&ar_pci->compl_lock);
649                 list_add_tail(&compl->list, &ar_pci->compl_process);
650                 spin_unlock_bh(&ar_pci->compl_lock);
651         }
652
653         ath10k_pci_process_ce(ar);
654 }
655
656 /* Called by lower (CE) layer when data is received from the Target. */
657 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
658 {
659         struct ath10k *ar = ce_state->ar;
660         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
661         struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
662         struct ath10k_pci_compl *compl;
663         struct sk_buff *skb;
664         void *transfer_context;
665         u32 ce_data;
666         unsigned int nbytes;
667         unsigned int transfer_id;
668         unsigned int flags;
669
670         while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
671                                              &ce_data, &nbytes, &transfer_id,
672                                              &flags) == 0) {
673                 compl = get_free_compl(pipe_info);
674                 if (!compl)
675                         break;
676
677                 compl->state = ATH10K_PCI_COMPL_RECV;
678                 compl->ce_state = ce_state;
679                 compl->pipe_info = pipe_info;
680                 compl->skb = transfer_context;
681                 compl->nbytes = nbytes;
682                 compl->transfer_id = transfer_id;
683                 compl->flags = flags;
684
685                 skb = transfer_context;
686                 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
687                                  skb->len + skb_tailroom(skb),
688                                  DMA_FROM_DEVICE);
689                 /*
690                  * Add the completion to the processing queue.
691                  */
692                 spin_lock_bh(&ar_pci->compl_lock);
693                 list_add_tail(&compl->list, &ar_pci->compl_process);
694                 spin_unlock_bh(&ar_pci->compl_lock);
695         }
696
697         ath10k_pci_process_ce(ar);
698 }
699
700 /* Send the first nbytes bytes of the buffer */
701 static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
702                                     unsigned int transfer_id,
703                                     unsigned int bytes, struct sk_buff *nbuf)
704 {
705         struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
706         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
707         struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
708         struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
709         unsigned int len;
710         u32 flags = 0;
711         int ret;
712
713         len = min(bytes, nbuf->len);
714         bytes -= len;
715
716         if (len & 3)
717                 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
718
719         ath10k_dbg(ATH10K_DBG_PCI,
720                    "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
721                    nbuf->data, (unsigned long long) skb_cb->paddr,
722                    nbuf->len, len);
723         ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
724                         "ath10k tx: data: ",
725                         nbuf->data, nbuf->len);
726
727         ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
728                              flags);
729         if (ret)
730                 ath10k_warn("CE send failed: %p\n", nbuf);
731
732         return ret;
733 }
734
735 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
736 {
737         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
738         return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
739 }
740
741 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
742 {
743         u32 reg_dump_area = 0;
744         u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
745         u32 host_addr;
746         int ret;
747         u32 i;
748
749         ath10k_err("firmware crashed!\n");
750         ath10k_err("hardware name %s version 0x%x\n",
751                    ar->hw_params.name, ar->target_version);
752         ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
753                    ar->fw_version_minor, ar->fw_version_release,
754                    ar->fw_version_build);
755
756         host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
757         if (ath10k_pci_diag_read_mem(ar, host_addr,
758                                      &reg_dump_area, sizeof(u32)) != 0) {
759                 ath10k_warn("could not read hi_failure_state\n");
760                 return;
761         }
762
763         ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
764
765         ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
766                                        &reg_dump_values[0],
767                                        REG_DUMP_COUNT_QCA988X * sizeof(u32));
768         if (ret != 0) {
769                 ath10k_err("could not dump FW Dump Area\n");
770                 return;
771         }
772
773         BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
774
775         ath10k_err("target Register Dump\n");
776         for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
777                 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
778                            i,
779                            reg_dump_values[i],
780                            reg_dump_values[i + 1],
781                            reg_dump_values[i + 2],
782                            reg_dump_values[i + 3]);
783
784         queue_work(ar->workqueue, &ar->restart_work);
785 }
786
787 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
788                                                int force)
789 {
790         if (!force) {
791                 int resources;
792                 /*
793                  * Decide whether to actually poll for completions, or just
794                  * wait for a later chance.
795                  * If there seem to be plenty of resources left, then just wait
796                  * since checking involves reading a CE register, which is a
797                  * relatively expensive operation.
798                  */
799                 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
800
801                 /*
802                  * If at least 50% of the total resources are still available,
803                  * don't bother checking again yet.
804                  */
805                 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
806                         return;
807         }
808         ath10k_ce_per_engine_service(ar, pipe);
809 }
810
811 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
812                                          struct ath10k_hif_cb *callbacks)
813 {
814         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
815
816         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
817
818         memcpy(&ar_pci->msg_callbacks_current, callbacks,
819                sizeof(ar_pci->msg_callbacks_current));
820 }
821
822 static int ath10k_pci_start_ce(struct ath10k *ar)
823 {
824         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
825         struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag;
826         const struct ce_attr *attr;
827         struct ath10k_pci_pipe *pipe_info;
828         struct ath10k_pci_compl *compl;
829         int i, pipe_num, completions, disable_interrupts;
830
831         spin_lock_init(&ar_pci->compl_lock);
832         INIT_LIST_HEAD(&ar_pci->compl_process);
833
834         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
835                 pipe_info = &ar_pci->pipe_info[pipe_num];
836
837                 spin_lock_init(&pipe_info->pipe_lock);
838                 INIT_LIST_HEAD(&pipe_info->compl_free);
839
840                 /* Handle Diagnostic CE specially */
841                 if (pipe_info->ce_hdl == ce_diag)
842                         continue;
843
844                 attr = &host_ce_config_wlan[pipe_num];
845                 completions = 0;
846
847                 if (attr->src_nentries) {
848                         disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
849                         ath10k_ce_send_cb_register(pipe_info->ce_hdl,
850                                                    ath10k_pci_ce_send_done,
851                                                    disable_interrupts);
852                         completions += attr->src_nentries;
853                 }
854
855                 if (attr->dest_nentries) {
856                         ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
857                                                    ath10k_pci_ce_recv_data);
858                         completions += attr->dest_nentries;
859                 }
860
861                 if (completions == 0)
862                         continue;
863
864                 for (i = 0; i < completions; i++) {
865                         compl = kmalloc(sizeof(*compl), GFP_KERNEL);
866                         if (!compl) {
867                                 ath10k_warn("No memory for completion state\n");
868                                 ath10k_pci_stop_ce(ar);
869                                 return -ENOMEM;
870                         }
871
872                         compl->state = ATH10K_PCI_COMPL_FREE;
873                         list_add_tail(&compl->list, &pipe_info->compl_free);
874                 }
875         }
876
877         return 0;
878 }
879
880 static void ath10k_pci_stop_ce(struct ath10k *ar)
881 {
882         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
883         struct ath10k_pci_compl *compl;
884         struct sk_buff *skb;
885         int i;
886
887         ath10k_ce_disable_interrupts(ar);
888
889         /* Cancel the pending tasklet */
890         tasklet_kill(&ar_pci->intr_tq);
891
892         for (i = 0; i < CE_COUNT; i++)
893                 tasklet_kill(&ar_pci->pipe_info[i].intr);
894
895         /* Mark pending completions as aborted, so that upper layers free up
896          * their associated resources */
897         spin_lock_bh(&ar_pci->compl_lock);
898         list_for_each_entry(compl, &ar_pci->compl_process, list) {
899                 skb = compl->skb;
900                 ATH10K_SKB_CB(skb)->is_aborted = true;
901         }
902         spin_unlock_bh(&ar_pci->compl_lock);
903 }
904
905 static void ath10k_pci_cleanup_ce(struct ath10k *ar)
906 {
907         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
908         struct ath10k_pci_compl *compl, *tmp;
909         struct ath10k_pci_pipe *pipe_info;
910         struct sk_buff *netbuf;
911         int pipe_num;
912
913         /* Free pending completions. */
914         spin_lock_bh(&ar_pci->compl_lock);
915         if (!list_empty(&ar_pci->compl_process))
916                 ath10k_warn("pending completions still present! possible memory leaks.\n");
917
918         list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
919                 list_del(&compl->list);
920                 netbuf = compl->skb;
921                 dev_kfree_skb_any(netbuf);
922                 kfree(compl);
923         }
924         spin_unlock_bh(&ar_pci->compl_lock);
925
926         /* Free unused completions for each pipe. */
927         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
928                 pipe_info = &ar_pci->pipe_info[pipe_num];
929
930                 spin_lock_bh(&pipe_info->pipe_lock);
931                 list_for_each_entry_safe(compl, tmp,
932                                          &pipe_info->compl_free, list) {
933                         list_del(&compl->list);
934                         kfree(compl);
935                 }
936                 spin_unlock_bh(&pipe_info->pipe_lock);
937         }
938 }
939
940 static void ath10k_pci_process_ce(struct ath10k *ar)
941 {
942         struct ath10k_pci *ar_pci = ar->hif.priv;
943         struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
944         struct ath10k_pci_compl *compl;
945         struct sk_buff *skb;
946         unsigned int nbytes;
947         int ret, send_done = 0;
948
949         /* Upper layers aren't ready to handle tx/rx completions in parallel so
950          * we must serialize all completion processing. */
951
952         spin_lock_bh(&ar_pci->compl_lock);
953         if (ar_pci->compl_processing) {
954                 spin_unlock_bh(&ar_pci->compl_lock);
955                 return;
956         }
957         ar_pci->compl_processing = true;
958         spin_unlock_bh(&ar_pci->compl_lock);
959
960         for (;;) {
961                 spin_lock_bh(&ar_pci->compl_lock);
962                 if (list_empty(&ar_pci->compl_process)) {
963                         spin_unlock_bh(&ar_pci->compl_lock);
964                         break;
965                 }
966                 compl = list_first_entry(&ar_pci->compl_process,
967                                          struct ath10k_pci_compl, list);
968                 list_del(&compl->list);
969                 spin_unlock_bh(&ar_pci->compl_lock);
970
971                 switch (compl->state) {
972                 case ATH10K_PCI_COMPL_SEND:
973                         cb->tx_completion(ar,
974                                           compl->skb,
975                                           compl->transfer_id);
976                         send_done = 1;
977                         break;
978                 case ATH10K_PCI_COMPL_RECV:
979                         ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
980                         if (ret) {
981                                 ath10k_warn("Unable to post recv buffer for pipe: %d\n",
982                                             compl->pipe_info->pipe_num);
983                                 break;
984                         }
985
986                         skb = compl->skb;
987                         nbytes = compl->nbytes;
988
989                         ath10k_dbg(ATH10K_DBG_PCI,
990                                    "ath10k_pci_ce_recv_data netbuf=%p  nbytes=%d\n",
991                                    skb, nbytes);
992                         ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
993                                         "ath10k rx: ", skb->data, nbytes);
994
995                         if (skb->len + skb_tailroom(skb) >= nbytes) {
996                                 skb_trim(skb, 0);
997                                 skb_put(skb, nbytes);
998                                 cb->rx_completion(ar, skb,
999                                                   compl->pipe_info->pipe_num);
1000                         } else {
1001                                 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
1002                                             nbytes,
1003                                             skb->len + skb_tailroom(skb));
1004                         }
1005                         break;
1006                 case ATH10K_PCI_COMPL_FREE:
1007                         ath10k_warn("free completion cannot be processed\n");
1008                         break;
1009                 default:
1010                         ath10k_warn("invalid completion state (%d)\n",
1011                                     compl->state);
1012                         break;
1013                 }
1014
1015                 compl->state = ATH10K_PCI_COMPL_FREE;
1016
1017                 /*
1018                  * Add completion back to the pipe's free list.
1019                  */
1020                 spin_lock_bh(&compl->pipe_info->pipe_lock);
1021                 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
1022                 spin_unlock_bh(&compl->pipe_info->pipe_lock);
1023         }
1024
1025         spin_lock_bh(&ar_pci->compl_lock);
1026         ar_pci->compl_processing = false;
1027         spin_unlock_bh(&ar_pci->compl_lock);
1028 }
1029
1030 /* TODO - temporary mapping while we have too few CE's */
1031 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1032                                               u16 service_id, u8 *ul_pipe,
1033                                               u8 *dl_pipe, int *ul_is_polled,
1034                                               int *dl_is_polled)
1035 {
1036         int ret = 0;
1037
1038         /* polling for received messages not supported */
1039         *dl_is_polled = 0;
1040
1041         switch (service_id) {
1042         case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
1043                 /*
1044                  * Host->target HTT gets its own pipe, so it can be polled
1045                  * while other pipes are interrupt driven.
1046                  */
1047                 *ul_pipe = 4;
1048                 /*
1049                  * Use the same target->host pipe for HTC ctrl, HTC raw
1050                  * streams, and HTT.
1051                  */
1052                 *dl_pipe = 1;
1053                 break;
1054
1055         case ATH10K_HTC_SVC_ID_RSVD_CTRL:
1056         case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
1057                 /*
1058                  * Note: HTC_RAW_STREAMS_SVC is currently unused, and
1059                  * HTC_CTRL_RSVD_SVC could share the same pipe as the
1060                  * WMI services.  So, if another CE is needed, change
1061                  * this to *ul_pipe = 3, which frees up CE 0.
1062                  */
1063                 /* *ul_pipe = 3; */
1064                 *ul_pipe = 0;
1065                 *dl_pipe = 1;
1066                 break;
1067
1068         case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1069         case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1070         case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1071         case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1072
1073         case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1074                 *ul_pipe = 3;
1075                 *dl_pipe = 2;
1076                 break;
1077
1078                 /* pipe 5 unused   */
1079                 /* pipe 6 reserved */
1080                 /* pipe 7 reserved */
1081
1082         default:
1083                 ret = -1;
1084                 break;
1085         }
1086         *ul_is_polled =
1087                 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1088
1089         return ret;
1090 }
1091
1092 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1093                                                 u8 *ul_pipe, u8 *dl_pipe)
1094 {
1095         int ul_is_polled, dl_is_polled;
1096
1097         (void)ath10k_pci_hif_map_service_to_pipe(ar,
1098                                                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1099                                                  ul_pipe,
1100                                                  dl_pipe,
1101                                                  &ul_is_polled,
1102                                                  &dl_is_polled);
1103 }
1104
1105 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
1106                                    int num)
1107 {
1108         struct ath10k *ar = pipe_info->hif_ce_state;
1109         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1110         struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
1111         struct sk_buff *skb;
1112         dma_addr_t ce_data;
1113         int i, ret = 0;
1114
1115         if (pipe_info->buf_sz == 0)
1116                 return 0;
1117
1118         for (i = 0; i < num; i++) {
1119                 skb = dev_alloc_skb(pipe_info->buf_sz);
1120                 if (!skb) {
1121                         ath10k_warn("could not allocate skbuff for pipe %d\n",
1122                                     num);
1123                         ret = -ENOMEM;
1124                         goto err;
1125                 }
1126
1127                 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1128
1129                 ce_data = dma_map_single(ar->dev, skb->data,
1130                                          skb->len + skb_tailroom(skb),
1131                                          DMA_FROM_DEVICE);
1132
1133                 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1134                         ath10k_warn("could not dma map skbuff\n");
1135                         dev_kfree_skb_any(skb);
1136                         ret = -EIO;
1137                         goto err;
1138                 }
1139
1140                 ATH10K_SKB_CB(skb)->paddr = ce_data;
1141
1142                 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1143                                                pipe_info->buf_sz,
1144                                                PCI_DMA_FROMDEVICE);
1145
1146                 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1147                                                  ce_data);
1148                 if (ret) {
1149                         ath10k_warn("could not enqueue to pipe %d (%d)\n",
1150                                     num, ret);
1151                         goto err;
1152                 }
1153         }
1154
1155         return ret;
1156
1157 err:
1158         ath10k_pci_rx_pipe_cleanup(pipe_info);
1159         return ret;
1160 }
1161
1162 static int ath10k_pci_post_rx(struct ath10k *ar)
1163 {
1164         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1165         struct ath10k_pci_pipe *pipe_info;
1166         const struct ce_attr *attr;
1167         int pipe_num, ret = 0;
1168
1169         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1170                 pipe_info = &ar_pci->pipe_info[pipe_num];
1171                 attr = &host_ce_config_wlan[pipe_num];
1172
1173                 if (attr->dest_nentries == 0)
1174                         continue;
1175
1176                 ret = ath10k_pci_post_rx_pipe(pipe_info,
1177                                               attr->dest_nentries - 1);
1178                 if (ret) {
1179                         ath10k_warn("Unable to replenish recv buffers for pipe: %d\n",
1180                                     pipe_num);
1181
1182                         for (; pipe_num >= 0; pipe_num--) {
1183                                 pipe_info = &ar_pci->pipe_info[pipe_num];
1184                                 ath10k_pci_rx_pipe_cleanup(pipe_info);
1185                         }
1186                         return ret;
1187                 }
1188         }
1189
1190         return 0;
1191 }
1192
1193 static int ath10k_pci_hif_start(struct ath10k *ar)
1194 {
1195         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1196         int ret;
1197
1198         ret = ath10k_pci_start_ce(ar);
1199         if (ret) {
1200                 ath10k_warn("could not start CE (%d)\n", ret);
1201                 return ret;
1202         }
1203
1204         /* Post buffers once to start things off. */
1205         ret = ath10k_pci_post_rx(ar);
1206         if (ret) {
1207                 ath10k_warn("could not post rx pipes (%d)\n", ret);
1208                 return ret;
1209         }
1210
1211         ar_pci->started = 1;
1212         return 0;
1213 }
1214
1215 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1216 {
1217         struct ath10k *ar;
1218         struct ath10k_pci *ar_pci;
1219         struct ath10k_ce_pipe *ce_hdl;
1220         u32 buf_sz;
1221         struct sk_buff *netbuf;
1222         u32 ce_data;
1223
1224         buf_sz = pipe_info->buf_sz;
1225
1226         /* Unused Copy Engine */
1227         if (buf_sz == 0)
1228                 return;
1229
1230         ar = pipe_info->hif_ce_state;
1231         ar_pci = ath10k_pci_priv(ar);
1232
1233         if (!ar_pci->started)
1234                 return;
1235
1236         ce_hdl = pipe_info->ce_hdl;
1237
1238         while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1239                                           &ce_data) == 0) {
1240                 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1241                                  netbuf->len + skb_tailroom(netbuf),
1242                                  DMA_FROM_DEVICE);
1243                 dev_kfree_skb_any(netbuf);
1244         }
1245 }
1246
1247 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1248 {
1249         struct ath10k *ar;
1250         struct ath10k_pci *ar_pci;
1251         struct ath10k_ce_pipe *ce_hdl;
1252         struct sk_buff *netbuf;
1253         u32 ce_data;
1254         unsigned int nbytes;
1255         unsigned int id;
1256         u32 buf_sz;
1257
1258         buf_sz = pipe_info->buf_sz;
1259
1260         /* Unused Copy Engine */
1261         if (buf_sz == 0)
1262                 return;
1263
1264         ar = pipe_info->hif_ce_state;
1265         ar_pci = ath10k_pci_priv(ar);
1266
1267         if (!ar_pci->started)
1268                 return;
1269
1270         ce_hdl = pipe_info->ce_hdl;
1271
1272         while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1273                                           &ce_data, &nbytes, &id) == 0) {
1274                 /*
1275                  * Indicate the completion to higer layer to free
1276                  * the buffer
1277                  */
1278                 ATH10K_SKB_CB(netbuf)->is_aborted = true;
1279                 ar_pci->msg_callbacks_current.tx_completion(ar,
1280                                                             netbuf,
1281                                                             id);
1282         }
1283 }
1284
1285 /*
1286  * Cleanup residual buffers for device shutdown:
1287  *    buffers that were enqueued for receive
1288  *    buffers that were to be sent
1289  * Note: Buffers that had completed but which were
1290  * not yet processed are on a completion queue. They
1291  * are handled when the completion thread shuts down.
1292  */
1293 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1294 {
1295         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1296         int pipe_num;
1297
1298         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1299                 struct ath10k_pci_pipe *pipe_info;
1300
1301                 pipe_info = &ar_pci->pipe_info[pipe_num];
1302                 ath10k_pci_rx_pipe_cleanup(pipe_info);
1303                 ath10k_pci_tx_pipe_cleanup(pipe_info);
1304         }
1305 }
1306
1307 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1308 {
1309         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1310         struct ath10k_pci_pipe *pipe_info;
1311         int pipe_num;
1312
1313         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1314                 pipe_info = &ar_pci->pipe_info[pipe_num];
1315                 if (pipe_info->ce_hdl) {
1316                         ath10k_ce_deinit(pipe_info->ce_hdl);
1317                         pipe_info->ce_hdl = NULL;
1318                         pipe_info->buf_sz = 0;
1319                 }
1320         }
1321 }
1322
1323 static void ath10k_pci_disable_irqs(struct ath10k *ar)
1324 {
1325         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1326         int i;
1327
1328         for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1329                 disable_irq(ar_pci->pdev->irq + i);
1330 }
1331
1332 static void ath10k_pci_hif_stop(struct ath10k *ar)
1333 {
1334         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1335
1336         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1337
1338         /* Irqs are never explicitly re-enabled. They are implicitly re-enabled
1339          * by ath10k_pci_start_intr(). */
1340         ath10k_pci_disable_irqs(ar);
1341
1342         ath10k_pci_stop_ce(ar);
1343
1344         /* At this point, asynchronous threads are stopped, the target should
1345          * not DMA nor interrupt. We process the leftovers and then free
1346          * everything else up. */
1347
1348         ath10k_pci_process_ce(ar);
1349         ath10k_pci_cleanup_ce(ar);
1350         ath10k_pci_buffer_cleanup(ar);
1351
1352         ar_pci->started = 0;
1353 }
1354
1355 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1356                                            void *req, u32 req_len,
1357                                            void *resp, u32 *resp_len)
1358 {
1359         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1360         struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1361         struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1362         struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1363         struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1364         dma_addr_t req_paddr = 0;
1365         dma_addr_t resp_paddr = 0;
1366         struct bmi_xfer xfer = {};
1367         void *treq, *tresp = NULL;
1368         int ret = 0;
1369
1370         if (resp && !resp_len)
1371                 return -EINVAL;
1372
1373         if (resp && resp_len && *resp_len == 0)
1374                 return -EINVAL;
1375
1376         treq = kmemdup(req, req_len, GFP_KERNEL);
1377         if (!treq)
1378                 return -ENOMEM;
1379
1380         req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1381         ret = dma_mapping_error(ar->dev, req_paddr);
1382         if (ret)
1383                 goto err_dma;
1384
1385         if (resp && resp_len) {
1386                 tresp = kzalloc(*resp_len, GFP_KERNEL);
1387                 if (!tresp) {
1388                         ret = -ENOMEM;
1389                         goto err_req;
1390                 }
1391
1392                 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1393                                             DMA_FROM_DEVICE);
1394                 ret = dma_mapping_error(ar->dev, resp_paddr);
1395                 if (ret)
1396                         goto err_req;
1397
1398                 xfer.wait_for_resp = true;
1399                 xfer.resp_len = 0;
1400
1401                 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1402         }
1403
1404         init_completion(&xfer.done);
1405
1406         ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1407         if (ret)
1408                 goto err_resp;
1409
1410         ret = wait_for_completion_timeout(&xfer.done,
1411                                           BMI_COMMUNICATION_TIMEOUT_HZ);
1412         if (ret <= 0) {
1413                 u32 unused_buffer;
1414                 unsigned int unused_nbytes;
1415                 unsigned int unused_id;
1416
1417                 ret = -ETIMEDOUT;
1418                 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1419                                            &unused_nbytes, &unused_id);
1420         } else {
1421                 /* non-zero means we did not time out */
1422                 ret = 0;
1423         }
1424
1425 err_resp:
1426         if (resp) {
1427                 u32 unused_buffer;
1428
1429                 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1430                 dma_unmap_single(ar->dev, resp_paddr,
1431                                  *resp_len, DMA_FROM_DEVICE);
1432         }
1433 err_req:
1434         dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1435
1436         if (ret == 0 && resp_len) {
1437                 *resp_len = min(*resp_len, xfer.resp_len);
1438                 memcpy(resp, tresp, xfer.resp_len);
1439         }
1440 err_dma:
1441         kfree(treq);
1442         kfree(tresp);
1443
1444         return ret;
1445 }
1446
1447 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1448 {
1449         struct bmi_xfer *xfer;
1450         u32 ce_data;
1451         unsigned int nbytes;
1452         unsigned int transfer_id;
1453
1454         if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1455                                           &nbytes, &transfer_id))
1456                 return;
1457
1458         if (xfer->wait_for_resp)
1459                 return;
1460
1461         complete(&xfer->done);
1462 }
1463
1464 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1465 {
1466         struct bmi_xfer *xfer;
1467         u32 ce_data;
1468         unsigned int nbytes;
1469         unsigned int transfer_id;
1470         unsigned int flags;
1471
1472         if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1473                                           &nbytes, &transfer_id, &flags))
1474                 return;
1475
1476         if (!xfer->wait_for_resp) {
1477                 ath10k_warn("unexpected: BMI data received; ignoring\n");
1478                 return;
1479         }
1480
1481         xfer->resp_len = nbytes;
1482         complete(&xfer->done);
1483 }
1484
1485 /*
1486  * Map from service/endpoint to Copy Engine.
1487  * This table is derived from the CE_PCI TABLE, above.
1488  * It is passed to the Target at startup for use by firmware.
1489  */
1490 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1491         {
1492                  ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1493                  PIPEDIR_OUT,           /* out = UL = host -> target */
1494                  3,
1495         },
1496         {
1497                  ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1498                  PIPEDIR_IN,            /* in = DL = target -> host */
1499                  2,
1500         },
1501         {
1502                  ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1503                  PIPEDIR_OUT,           /* out = UL = host -> target */
1504                  3,
1505         },
1506         {
1507                  ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1508                  PIPEDIR_IN,            /* in = DL = target -> host */
1509                  2,
1510         },
1511         {
1512                  ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1513                  PIPEDIR_OUT,           /* out = UL = host -> target */
1514                  3,
1515         },
1516         {
1517                  ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1518                  PIPEDIR_IN,            /* in = DL = target -> host */
1519                  2,
1520         },
1521         {
1522                  ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1523                  PIPEDIR_OUT,           /* out = UL = host -> target */
1524                  3,
1525         },
1526         {
1527                  ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1528                  PIPEDIR_IN,            /* in = DL = target -> host */
1529                  2,
1530         },
1531         {
1532                  ATH10K_HTC_SVC_ID_WMI_CONTROL,
1533                  PIPEDIR_OUT,           /* out = UL = host -> target */
1534                  3,
1535         },
1536         {
1537                  ATH10K_HTC_SVC_ID_WMI_CONTROL,
1538                  PIPEDIR_IN,            /* in = DL = target -> host */
1539                  2,
1540         },
1541         {
1542                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1543                  PIPEDIR_OUT,           /* out = UL = host -> target */
1544                  0,             /* could be moved to 3 (share with WMI) */
1545         },
1546         {
1547                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1548                  PIPEDIR_IN,            /* in = DL = target -> host */
1549                  1,
1550         },
1551         {
1552                  ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
1553                  PIPEDIR_OUT,           /* out = UL = host -> target */
1554                  0,
1555         },
1556         {
1557                  ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
1558                  PIPEDIR_IN,            /* in = DL = target -> host */
1559                  1,
1560         },
1561         {
1562                  ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1563                  PIPEDIR_OUT,           /* out = UL = host -> target */
1564                  4,
1565         },
1566         {
1567                  ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1568                  PIPEDIR_IN,            /* in = DL = target -> host */
1569                  1,
1570         },
1571
1572         /* (Additions here) */
1573
1574         {                               /* Must be last */
1575                  0,
1576                  0,
1577                  0,
1578         },
1579 };
1580
1581 /*
1582  * Send an interrupt to the device to wake up the Target CPU
1583  * so it has an opportunity to notice any changed state.
1584  */
1585 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1586 {
1587         int ret;
1588         u32 core_ctrl;
1589
1590         ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1591                                               CORE_CTRL_ADDRESS,
1592                                           &core_ctrl);
1593         if (ret) {
1594                 ath10k_warn("Unable to read core ctrl\n");
1595                 return ret;
1596         }
1597
1598         /* A_INUM_FIRMWARE interrupt to Target CPU */
1599         core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1600
1601         ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1602                                                CORE_CTRL_ADDRESS,
1603                                            core_ctrl);
1604         if (ret)
1605                 ath10k_warn("Unable to set interrupt mask\n");
1606
1607         return ret;
1608 }
1609
1610 static int ath10k_pci_init_config(struct ath10k *ar)
1611 {
1612         u32 interconnect_targ_addr;
1613         u32 pcie_state_targ_addr = 0;
1614         u32 pipe_cfg_targ_addr = 0;
1615         u32 svc_to_pipe_map = 0;
1616         u32 pcie_config_flags = 0;
1617         u32 ealloc_value;
1618         u32 ealloc_targ_addr;
1619         u32 flag2_value;
1620         u32 flag2_targ_addr;
1621         int ret = 0;
1622
1623         /* Download to Target the CE Config and the service-to-CE map */
1624         interconnect_targ_addr =
1625                 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1626
1627         /* Supply Target-side CE configuration */
1628         ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1629                                           &pcie_state_targ_addr);
1630         if (ret != 0) {
1631                 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1632                 return ret;
1633         }
1634
1635         if (pcie_state_targ_addr == 0) {
1636                 ret = -EIO;
1637                 ath10k_err("Invalid pcie state addr\n");
1638                 return ret;
1639         }
1640
1641         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1642                                           offsetof(struct pcie_state,
1643                                                    pipe_cfg_addr),
1644                                           &pipe_cfg_targ_addr);
1645         if (ret != 0) {
1646                 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1647                 return ret;
1648         }
1649
1650         if (pipe_cfg_targ_addr == 0) {
1651                 ret = -EIO;
1652                 ath10k_err("Invalid pipe cfg addr\n");
1653                 return ret;
1654         }
1655
1656         ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1657                                  target_ce_config_wlan,
1658                                  sizeof(target_ce_config_wlan));
1659
1660         if (ret != 0) {
1661                 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1662                 return ret;
1663         }
1664
1665         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1666                                           offsetof(struct pcie_state,
1667                                                    svc_to_pipe_map),
1668                                           &svc_to_pipe_map);
1669         if (ret != 0) {
1670                 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1671                 return ret;
1672         }
1673
1674         if (svc_to_pipe_map == 0) {
1675                 ret = -EIO;
1676                 ath10k_err("Invalid svc_to_pipe map\n");
1677                 return ret;
1678         }
1679
1680         ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1681                                  target_service_to_ce_map_wlan,
1682                                  sizeof(target_service_to_ce_map_wlan));
1683         if (ret != 0) {
1684                 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1685                 return ret;
1686         }
1687
1688         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1689                                           offsetof(struct pcie_state,
1690                                                    config_flags),
1691                                           &pcie_config_flags);
1692         if (ret != 0) {
1693                 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1694                 return ret;
1695         }
1696
1697         pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1698
1699         ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1700                                  offsetof(struct pcie_state, config_flags),
1701                                  &pcie_config_flags,
1702                                  sizeof(pcie_config_flags));
1703         if (ret != 0) {
1704                 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1705                 return ret;
1706         }
1707
1708         /* configure early allocation */
1709         ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1710
1711         ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1712         if (ret != 0) {
1713                 ath10k_err("Faile to get early alloc val: %d\n", ret);
1714                 return ret;
1715         }
1716
1717         /* first bank is switched to IRAM */
1718         ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1719                          HI_EARLY_ALLOC_MAGIC_MASK);
1720         ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1721                          HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1722
1723         ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1724         if (ret != 0) {
1725                 ath10k_err("Failed to set early alloc val: %d\n", ret);
1726                 return ret;
1727         }
1728
1729         /* Tell Target to proceed with initialization */
1730         flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1731
1732         ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1733         if (ret != 0) {
1734                 ath10k_err("Failed to get option val: %d\n", ret);
1735                 return ret;
1736         }
1737
1738         flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1739
1740         ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1741         if (ret != 0) {
1742                 ath10k_err("Failed to set option val: %d\n", ret);
1743                 return ret;
1744         }
1745
1746         return 0;
1747 }
1748
1749
1750
1751 static int ath10k_pci_ce_init(struct ath10k *ar)
1752 {
1753         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1754         struct ath10k_pci_pipe *pipe_info;
1755         const struct ce_attr *attr;
1756         int pipe_num;
1757
1758         for (pipe_num = 0; pipe_num < ar_pci->ce_count; pipe_num++) {
1759                 pipe_info = &ar_pci->pipe_info[pipe_num];
1760                 pipe_info->pipe_num = pipe_num;
1761                 pipe_info->hif_ce_state = ar;
1762                 attr = &host_ce_config_wlan[pipe_num];
1763
1764                 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1765                 if (pipe_info->ce_hdl == NULL) {
1766                         ath10k_err("Unable to initialize CE for pipe: %d\n",
1767                                    pipe_num);
1768
1769                         /* It is safe to call it here. It checks if ce_hdl is
1770                          * valid for each pipe */
1771                         ath10k_pci_ce_deinit(ar);
1772                         return -1;
1773                 }
1774
1775                 if (pipe_num == ar_pci->ce_count - 1) {
1776                         /*
1777                          * Reserve the ultimate CE for
1778                          * diagnostic Window support
1779                          */
1780                         ar_pci->ce_diag =
1781                         ar_pci->pipe_info[ar_pci->ce_count - 1].ce_hdl;
1782                         continue;
1783                 }
1784
1785                 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1786         }
1787
1788         /*
1789          * Initially, establish CE completion handlers for use with BMI.
1790          * These are overwritten with generic handlers after we exit BMI phase.
1791          */
1792         pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1793         ath10k_ce_send_cb_register(pipe_info->ce_hdl,
1794                                    ath10k_pci_bmi_send_done, 0);
1795
1796         pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1797         ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
1798                                    ath10k_pci_bmi_recv_data);
1799
1800         return 0;
1801 }
1802
1803 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1804 {
1805         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1806         u32 fw_indicator_address, fw_indicator;
1807
1808         ath10k_pci_wake(ar);
1809
1810         fw_indicator_address = ar_pci->fw_indicator_address;
1811         fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1812
1813         if (fw_indicator & FW_IND_EVENT_PENDING) {
1814                 /* ACK: clear Target-side pending event */
1815                 ath10k_pci_write32(ar, fw_indicator_address,
1816                                    fw_indicator & ~FW_IND_EVENT_PENDING);
1817
1818                 if (ar_pci->started) {
1819                         ath10k_pci_hif_dump_area(ar);
1820                 } else {
1821                         /*
1822                          * Probable Target failure before we're prepared
1823                          * to handle it.  Generally unexpected.
1824                          */
1825                         ath10k_warn("early firmware event indicated\n");
1826                 }
1827         }
1828
1829         ath10k_pci_sleep(ar);
1830 }
1831
1832 static int ath10k_pci_hif_power_up(struct ath10k *ar)
1833 {
1834         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1835         int ret;
1836
1837         ret = ath10k_pci_start_intr(ar);
1838         if (ret) {
1839                 ath10k_err("could not start interrupt handling (%d)\n", ret);
1840                 goto err;
1841         }
1842
1843         /*
1844          * Bring the target up cleanly.
1845          *
1846          * The target may be in an undefined state with an AUX-powered Target
1847          * and a Host in WoW mode. If the Host crashes, loses power, or is
1848          * restarted (without unloading the driver) then the Target is left
1849          * (aux) powered and running. On a subsequent driver load, the Target
1850          * is in an unexpected state. We try to catch that here in order to
1851          * reset the Target and retry the probe.
1852          */
1853         ath10k_pci_device_reset(ar);
1854
1855         ret = ath10k_pci_reset_target(ar);
1856         if (ret)
1857                 goto err_irq;
1858
1859         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1860                 /* Force AWAKE forever */
1861                 ath10k_do_pci_wake(ar);
1862
1863         ret = ath10k_pci_ce_init(ar);
1864         if (ret)
1865                 goto err_ps;
1866
1867         ret = ath10k_pci_init_config(ar);
1868         if (ret)
1869                 goto err_ce;
1870
1871         ret = ath10k_pci_wake_target_cpu(ar);
1872         if (ret) {
1873                 ath10k_err("could not wake up target CPU (%d)\n", ret);
1874                 goto err_ce;
1875         }
1876
1877         return 0;
1878
1879 err_ce:
1880         ath10k_pci_ce_deinit(ar);
1881 err_ps:
1882         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1883                 ath10k_do_pci_sleep(ar);
1884 err_irq:
1885         ath10k_pci_stop_intr(ar);
1886 err:
1887         return ret;
1888 }
1889
1890 static void ath10k_pci_hif_power_down(struct ath10k *ar)
1891 {
1892         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1893
1894         ath10k_pci_stop_intr(ar);
1895
1896         ath10k_pci_ce_deinit(ar);
1897         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1898                 ath10k_do_pci_sleep(ar);
1899 }
1900
1901 #ifdef CONFIG_PM
1902
1903 #define ATH10K_PCI_PM_CONTROL 0x44
1904
1905 static int ath10k_pci_hif_suspend(struct ath10k *ar)
1906 {
1907         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1908         struct pci_dev *pdev = ar_pci->pdev;
1909         u32 val;
1910
1911         pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1912
1913         if ((val & 0x000000ff) != 0x3) {
1914                 pci_save_state(pdev);
1915                 pci_disable_device(pdev);
1916                 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1917                                        (val & 0xffffff00) | 0x03);
1918         }
1919
1920         return 0;
1921 }
1922
1923 static int ath10k_pci_hif_resume(struct ath10k *ar)
1924 {
1925         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1926         struct pci_dev *pdev = ar_pci->pdev;
1927         u32 val;
1928
1929         pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1930
1931         if ((val & 0x000000ff) != 0) {
1932                 pci_restore_state(pdev);
1933                 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1934                                        val & 0xffffff00);
1935                 /*
1936                  * Suspend/Resume resets the PCI configuration space,
1937                  * so we have to re-disable the RETRY_TIMEOUT register (0x41)
1938                  * to keep PCI Tx retries from interfering with C3 CPU state
1939                  */
1940                 pci_read_config_dword(pdev, 0x40, &val);
1941
1942                 if ((val & 0x0000ff00) != 0)
1943                         pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1944         }
1945
1946         return 0;
1947 }
1948 #endif
1949
1950 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
1951         .send_head              = ath10k_pci_hif_send_head,
1952         .exchange_bmi_msg       = ath10k_pci_hif_exchange_bmi_msg,
1953         .start                  = ath10k_pci_hif_start,
1954         .stop                   = ath10k_pci_hif_stop,
1955         .map_service_to_pipe    = ath10k_pci_hif_map_service_to_pipe,
1956         .get_default_pipe       = ath10k_pci_hif_get_default_pipe,
1957         .send_complete_check    = ath10k_pci_hif_send_complete_check,
1958         .set_callbacks          = ath10k_pci_hif_set_callbacks,
1959         .get_free_queue_number  = ath10k_pci_hif_get_free_queue_number,
1960         .power_up               = ath10k_pci_hif_power_up,
1961         .power_down             = ath10k_pci_hif_power_down,
1962 #ifdef CONFIG_PM
1963         .suspend                = ath10k_pci_hif_suspend,
1964         .resume                 = ath10k_pci_hif_resume,
1965 #endif
1966 };
1967
1968 static void ath10k_pci_ce_tasklet(unsigned long ptr)
1969 {
1970         struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
1971         struct ath10k_pci *ar_pci = pipe->ar_pci;
1972
1973         ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
1974 }
1975
1976 static void ath10k_msi_err_tasklet(unsigned long data)
1977 {
1978         struct ath10k *ar = (struct ath10k *)data;
1979
1980         ath10k_pci_fw_interrupt_handler(ar);
1981 }
1982
1983 /*
1984  * Handler for a per-engine interrupt on a PARTICULAR CE.
1985  * This is used in cases where each CE has a private MSI interrupt.
1986  */
1987 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
1988 {
1989         struct ath10k *ar = arg;
1990         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1991         int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
1992
1993         if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
1994                 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
1995                 return IRQ_HANDLED;
1996         }
1997
1998         /*
1999          * NOTE: We are able to derive ce_id from irq because we
2000          * use a one-to-one mapping for CE's 0..5.
2001          * CE's 6 & 7 do not use interrupts at all.
2002          *
2003          * This mapping must be kept in sync with the mapping
2004          * used by firmware.
2005          */
2006         tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2007         return IRQ_HANDLED;
2008 }
2009
2010 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2011 {
2012         struct ath10k *ar = arg;
2013         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2014
2015         tasklet_schedule(&ar_pci->msi_fw_err);
2016         return IRQ_HANDLED;
2017 }
2018
2019 /*
2020  * Top-level interrupt handler for all PCI interrupts from a Target.
2021  * When a block of MSI interrupts is allocated, this top-level handler
2022  * is not used; instead, we directly call the correct sub-handler.
2023  */
2024 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2025 {
2026         struct ath10k *ar = arg;
2027         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2028
2029         if (ar_pci->num_msi_intrs == 0) {
2030                 /*
2031                  * IMPORTANT: INTR_CLR regiser has to be set after
2032                  * INTR_ENABLE is set to 0, otherwise interrupt can not be
2033                  * really cleared.
2034                  */
2035                 iowrite32(0, ar_pci->mem +
2036                           (SOC_CORE_BASE_ADDRESS |
2037                            PCIE_INTR_ENABLE_ADDRESS));
2038                 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2039                           PCIE_INTR_CE_MASK_ALL,
2040                           ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2041                                          PCIE_INTR_CLR_ADDRESS));
2042                 /*
2043                  * IMPORTANT: this extra read transaction is required to
2044                  * flush the posted write buffer.
2045                  */
2046                 (void) ioread32(ar_pci->mem +
2047                                 (SOC_CORE_BASE_ADDRESS |
2048                                  PCIE_INTR_ENABLE_ADDRESS));
2049         }
2050
2051         tasklet_schedule(&ar_pci->intr_tq);
2052
2053         return IRQ_HANDLED;
2054 }
2055
2056 static void ath10k_pci_tasklet(unsigned long data)
2057 {
2058         struct ath10k *ar = (struct ath10k *)data;
2059         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2060
2061         ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2062         ath10k_ce_per_engine_service_any(ar);
2063
2064         if (ar_pci->num_msi_intrs == 0) {
2065                 /* Enable Legacy PCI line interrupts */
2066                 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2067                           PCIE_INTR_CE_MASK_ALL,
2068                           ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2069                                          PCIE_INTR_ENABLE_ADDRESS));
2070                 /*
2071                  * IMPORTANT: this extra read transaction is required to
2072                  * flush the posted write buffer
2073                  */
2074                 (void) ioread32(ar_pci->mem +
2075                                 (SOC_CORE_BASE_ADDRESS |
2076                                  PCIE_INTR_ENABLE_ADDRESS));
2077         }
2078 }
2079
2080 static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
2081 {
2082         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2083         int ret;
2084         int i;
2085
2086         ret = pci_enable_msi_block(ar_pci->pdev, num);
2087         if (ret)
2088                 return ret;
2089
2090         ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2091                           ath10k_pci_msi_fw_handler,
2092                           IRQF_SHARED, "ath10k_pci", ar);
2093         if (ret) {
2094                 ath10k_warn("request_irq(%d) failed %d\n",
2095                             ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2096
2097                 pci_disable_msi(ar_pci->pdev);
2098                 return ret;
2099         }
2100
2101         for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2102                 ret = request_irq(ar_pci->pdev->irq + i,
2103                                   ath10k_pci_per_engine_handler,
2104                                   IRQF_SHARED, "ath10k_pci", ar);
2105                 if (ret) {
2106                         ath10k_warn("request_irq(%d) failed %d\n",
2107                                     ar_pci->pdev->irq + i, ret);
2108
2109                         for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2110                                 free_irq(ar_pci->pdev->irq + i, ar);
2111
2112                         free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2113                         pci_disable_msi(ar_pci->pdev);
2114                         return ret;
2115                 }
2116         }
2117
2118         ath10k_info("MSI-X interrupt handling (%d intrs)\n", num);
2119         return 0;
2120 }
2121
2122 static int ath10k_pci_start_intr_msi(struct ath10k *ar)
2123 {
2124         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2125         int ret;
2126
2127         ret = pci_enable_msi(ar_pci->pdev);
2128         if (ret < 0)
2129                 return ret;
2130
2131         ret = request_irq(ar_pci->pdev->irq,
2132                           ath10k_pci_interrupt_handler,
2133                           IRQF_SHARED, "ath10k_pci", ar);
2134         if (ret < 0) {
2135                 pci_disable_msi(ar_pci->pdev);
2136                 return ret;
2137         }
2138
2139         ath10k_info("MSI interrupt handling\n");
2140         return 0;
2141 }
2142
2143 static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
2144 {
2145         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2146         int ret;
2147
2148         ret = request_irq(ar_pci->pdev->irq,
2149                           ath10k_pci_interrupt_handler,
2150                           IRQF_SHARED, "ath10k_pci", ar);
2151         if (ret < 0)
2152                 return ret;
2153
2154         /*
2155          * Make sure to wake the Target before enabling Legacy
2156          * Interrupt.
2157          */
2158         iowrite32(PCIE_SOC_WAKE_V_MASK,
2159                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2160                   PCIE_SOC_WAKE_ADDRESS);
2161
2162         ret = ath10k_pci_wait(ar);
2163         if (ret) {
2164                 ath10k_warn("Failed to enable legacy interrupt, target did not wake up: %d\n",
2165                             ret);
2166                 free_irq(ar_pci->pdev->irq, ar);
2167                 return ret;
2168         }
2169
2170         /*
2171          * A potential race occurs here: The CORE_BASE write
2172          * depends on target correctly decoding AXI address but
2173          * host won't know when target writes BAR to CORE_CTRL.
2174          * This write might get lost if target has NOT written BAR.
2175          * For now, fix the race by repeating the write in below
2176          * synchronization checking.
2177          */
2178         iowrite32(PCIE_INTR_FIRMWARE_MASK |
2179                   PCIE_INTR_CE_MASK_ALL,
2180                   ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2181                                  PCIE_INTR_ENABLE_ADDRESS));
2182         iowrite32(PCIE_SOC_WAKE_RESET,
2183                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2184                   PCIE_SOC_WAKE_ADDRESS);
2185
2186         ath10k_info("legacy interrupt handling\n");
2187         return 0;
2188 }
2189
2190 static int ath10k_pci_start_intr(struct ath10k *ar)
2191 {
2192         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2193         int num = MSI_NUM_REQUEST;
2194         int ret;
2195         int i;
2196
2197         tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar);
2198         tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2199                      (unsigned long) ar);
2200
2201         for (i = 0; i < CE_COUNT; i++) {
2202                 ar_pci->pipe_info[i].ar_pci = ar_pci;
2203                 tasklet_init(&ar_pci->pipe_info[i].intr,
2204                              ath10k_pci_ce_tasklet,
2205                              (unsigned long)&ar_pci->pipe_info[i]);
2206         }
2207
2208         if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
2209                 num = 1;
2210
2211         if (num > 1) {
2212                 ret = ath10k_pci_start_intr_msix(ar, num);
2213                 if (ret == 0)
2214                         goto exit;
2215
2216                 ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret);
2217                 num = 1;
2218         }
2219
2220         if (num == 1) {
2221                 ret = ath10k_pci_start_intr_msi(ar);
2222                 if (ret == 0)
2223                         goto exit;
2224
2225                 ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n",
2226                             ret);
2227                 num = 0;
2228         }
2229
2230         ret = ath10k_pci_start_intr_legacy(ar);
2231         if (ret) {
2232                 ath10k_warn("Failed to start legacy interrupts: %d\n", ret);
2233                 return ret;
2234         }
2235
2236 exit:
2237         ar_pci->num_msi_intrs = num;
2238         ar_pci->ce_count = CE_COUNT;
2239         return ret;
2240 }
2241
2242 static void ath10k_pci_stop_intr(struct ath10k *ar)
2243 {
2244         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2245         int i;
2246
2247         /* There's at least one interrupt irregardless whether its legacy INTR
2248          * or MSI or MSI-X */
2249         for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2250                 free_irq(ar_pci->pdev->irq + i, ar);
2251
2252         if (ar_pci->num_msi_intrs > 0)
2253                 pci_disable_msi(ar_pci->pdev);
2254 }
2255
2256 static int ath10k_pci_reset_target(struct ath10k *ar)
2257 {
2258         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2259         int wait_limit = 300; /* 3 sec */
2260         int ret;
2261
2262         /* Wait for Target to finish initialization before we proceed. */
2263         iowrite32(PCIE_SOC_WAKE_V_MASK,
2264                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2265                   PCIE_SOC_WAKE_ADDRESS);
2266
2267         ret = ath10k_pci_wait(ar);
2268         if (ret) {
2269                 ath10k_warn("Failed to reset target, target did not wake up: %d\n",
2270                             ret);
2271                 return ret;
2272         }
2273
2274         while (wait_limit-- &&
2275                !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2276                  FW_IND_INITIALIZED)) {
2277                 if (ar_pci->num_msi_intrs == 0)
2278                         /* Fix potential race by repeating CORE_BASE writes */
2279                         iowrite32(PCIE_INTR_FIRMWARE_MASK |
2280                                   PCIE_INTR_CE_MASK_ALL,
2281                                   ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2282                                                  PCIE_INTR_ENABLE_ADDRESS));
2283                 mdelay(10);
2284         }
2285
2286         if (wait_limit < 0) {
2287                 ath10k_err("Target stalled\n");
2288                 iowrite32(PCIE_SOC_WAKE_RESET,
2289                           ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2290                           PCIE_SOC_WAKE_ADDRESS);
2291                 return -EIO;
2292         }
2293
2294         iowrite32(PCIE_SOC_WAKE_RESET,
2295                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2296                   PCIE_SOC_WAKE_ADDRESS);
2297
2298         return 0;
2299 }
2300
2301 static void ath10k_pci_device_reset(struct ath10k *ar)
2302 {
2303         int i;
2304         u32 val;
2305
2306         if (!SOC_GLOBAL_RESET_ADDRESS)
2307                 return;
2308
2309         ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
2310                                PCIE_SOC_WAKE_V_MASK);
2311         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2312                 if (ath10k_pci_target_is_awake(ar))
2313                         break;
2314                 msleep(1);
2315         }
2316
2317         /* Put Target, including PCIe, into RESET. */
2318         val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2319         val |= 1;
2320         ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2321
2322         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2323                 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2324                                           RTC_STATE_COLD_RESET_MASK)
2325                         break;
2326                 msleep(1);
2327         }
2328
2329         /* Pull Target, including PCIe, out of RESET. */
2330         val &= ~1;
2331         ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2332
2333         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2334                 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2335                                             RTC_STATE_COLD_RESET_MASK))
2336                         break;
2337                 msleep(1);
2338         }
2339
2340         ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2341 }
2342
2343 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2344 {
2345         int i;
2346
2347         for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2348                 if (!test_bit(i, ar_pci->features))
2349                         continue;
2350
2351                 switch (i) {
2352                 case ATH10K_PCI_FEATURE_MSI_X:
2353                         ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
2354                         break;
2355                 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
2356                         ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
2357                         break;
2358                 }
2359         }
2360 }
2361
2362 static int ath10k_pci_probe(struct pci_dev *pdev,
2363                             const struct pci_device_id *pci_dev)
2364 {
2365         void __iomem *mem;
2366         int ret = 0;
2367         struct ath10k *ar;
2368         struct ath10k_pci *ar_pci;
2369         u32 lcr_val, chip_id;
2370
2371         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2372
2373         ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2374         if (ar_pci == NULL)
2375                 return -ENOMEM;
2376
2377         ar_pci->pdev = pdev;
2378         ar_pci->dev = &pdev->dev;
2379
2380         switch (pci_dev->device) {
2381         case QCA988X_2_0_DEVICE_ID:
2382                 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2383                 break;
2384         default:
2385                 ret = -ENODEV;
2386                 ath10k_err("Unkown device ID: %d\n", pci_dev->device);
2387                 goto err_ar_pci;
2388         }
2389
2390         if (ath10k_target_ps)
2391                 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2392
2393         ath10k_pci_dump_features(ar_pci);
2394
2395         ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
2396         if (!ar) {
2397                 ath10k_err("ath10k_core_create failed!\n");
2398                 ret = -EINVAL;
2399                 goto err_ar_pci;
2400         }
2401
2402         ar_pci->ar = ar;
2403         ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2404         atomic_set(&ar_pci->keep_awake_count, 0);
2405
2406         pci_set_drvdata(pdev, ar);
2407
2408         /*
2409          * Without any knowledge of the Host, the Target may have been reset or
2410          * power cycled and its Config Space may no longer reflect the PCI
2411          * address space that was assigned earlier by the PCI infrastructure.
2412          * Refresh it now.
2413          */
2414         ret = pci_assign_resource(pdev, BAR_NUM);
2415         if (ret) {
2416                 ath10k_err("cannot assign PCI space: %d\n", ret);
2417                 goto err_ar;
2418         }
2419
2420         ret = pci_enable_device(pdev);
2421         if (ret) {
2422                 ath10k_err("cannot enable PCI device: %d\n", ret);
2423                 goto err_ar;
2424         }
2425
2426         /* Request MMIO resources */
2427         ret = pci_request_region(pdev, BAR_NUM, "ath");
2428         if (ret) {
2429                 ath10k_err("PCI MMIO reservation error: %d\n", ret);
2430                 goto err_device;
2431         }
2432
2433         /*
2434          * Target structures have a limit of 32 bit DMA pointers.
2435          * DMA pointers can be wider than 32 bits by default on some systems.
2436          */
2437         ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2438         if (ret) {
2439                 ath10k_err("32-bit DMA not available: %d\n", ret);
2440                 goto err_region;
2441         }
2442
2443         ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2444         if (ret) {
2445                 ath10k_err("cannot enable 32-bit consistent DMA\n");
2446                 goto err_region;
2447         }
2448
2449         /* Set bus master bit in PCI_COMMAND to enable DMA */
2450         pci_set_master(pdev);
2451
2452         /*
2453          * Temporary FIX: disable ASPM
2454          * Will be removed after the OTP is programmed
2455          */
2456         pci_read_config_dword(pdev, 0x80, &lcr_val);
2457         pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2458
2459         /* Arrange for access to Target SoC registers. */
2460         mem = pci_iomap(pdev, BAR_NUM, 0);
2461         if (!mem) {
2462                 ath10k_err("PCI iomap error\n");
2463                 ret = -EIO;
2464                 goto err_master;
2465         }
2466
2467         ar_pci->mem = mem;
2468
2469         spin_lock_init(&ar_pci->ce_lock);
2470
2471         ret = ath10k_do_pci_wake(ar);
2472         if (ret) {
2473                 ath10k_err("Failed to get chip id: %d\n", ret);
2474                 goto err_iomap;
2475         }
2476
2477         chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
2478
2479         ath10k_do_pci_sleep(ar);
2480
2481         ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2482
2483         ret = ath10k_core_register(ar, chip_id);
2484         if (ret) {
2485                 ath10k_err("could not register driver core (%d)\n", ret);
2486                 goto err_iomap;
2487         }
2488
2489         return 0;
2490
2491 err_iomap:
2492         pci_iounmap(pdev, mem);
2493 err_master:
2494         pci_clear_master(pdev);
2495 err_region:
2496         pci_release_region(pdev, BAR_NUM);
2497 err_device:
2498         pci_disable_device(pdev);
2499 err_ar:
2500         ath10k_core_destroy(ar);
2501 err_ar_pci:
2502         /* call HIF PCI free here */
2503         kfree(ar_pci);
2504
2505         return ret;
2506 }
2507
2508 static void ath10k_pci_remove(struct pci_dev *pdev)
2509 {
2510         struct ath10k *ar = pci_get_drvdata(pdev);
2511         struct ath10k_pci *ar_pci;
2512
2513         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2514
2515         if (!ar)
2516                 return;
2517
2518         ar_pci = ath10k_pci_priv(ar);
2519
2520         if (!ar_pci)
2521                 return;
2522
2523         tasklet_kill(&ar_pci->msi_fw_err);
2524
2525         ath10k_core_unregister(ar);
2526
2527         pci_iounmap(pdev, ar_pci->mem);
2528         pci_release_region(pdev, BAR_NUM);
2529         pci_clear_master(pdev);
2530         pci_disable_device(pdev);
2531
2532         ath10k_core_destroy(ar);
2533         kfree(ar_pci);
2534 }
2535
2536 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2537
2538 static struct pci_driver ath10k_pci_driver = {
2539         .name = "ath10k_pci",
2540         .id_table = ath10k_pci_id_table,
2541         .probe = ath10k_pci_probe,
2542         .remove = ath10k_pci_remove,
2543 };
2544
2545 static int __init ath10k_pci_init(void)
2546 {
2547         int ret;
2548
2549         ret = pci_register_driver(&ath10k_pci_driver);
2550         if (ret)
2551                 ath10k_err("pci_register_driver failed [%d]\n", ret);
2552
2553         return ret;
2554 }
2555 module_init(ath10k_pci_init);
2556
2557 static void __exit ath10k_pci_exit(void)
2558 {
2559         pci_unregister_driver(&ath10k_pci_driver);
2560 }
2561
2562 module_exit(ath10k_pci_exit);
2563
2564 MODULE_AUTHOR("Qualcomm Atheros");
2565 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2566 MODULE_LICENSE("Dual BSD/GPL");
2567 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2568 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2569 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);