]> Pileus Git - ~andy/linux/blob - drivers/net/wireless/ath/ath10k/pci.c
ath10k: remove meaningless check
[~andy/linux] / drivers / net / wireless / ath / ath10k / pci.c
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22
23 #include "core.h"
24 #include "debug.h"
25
26 #include "targaddrs.h"
27 #include "bmi.h"
28
29 #include "hif.h"
30 #include "htc.h"
31
32 #include "ce.h"
33 #include "pci.h"
34
35 static unsigned int ath10k_target_ps;
36 module_param(ath10k_target_ps, uint, 0644);
37 MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
38
39 #define QCA988X_2_0_DEVICE_ID   (0x003c)
40
41 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
42         { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
43         {0}
44 };
45
46 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
47                                        u32 *data);
48
49 static void ath10k_pci_process_ce(struct ath10k *ar);
50 static int ath10k_pci_post_rx(struct ath10k *ar);
51 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
52                                              int num);
53 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
54 static void ath10k_pci_stop_ce(struct ath10k *ar);
55 static void ath10k_pci_device_reset(struct ath10k *ar);
56 static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
57 static int ath10k_pci_start_intr(struct ath10k *ar);
58 static void ath10k_pci_stop_intr(struct ath10k *ar);
59
60 static const struct ce_attr host_ce_config_wlan[] = {
61         /* CE0: host->target HTC control and raw streams */
62         {
63                 .flags = CE_ATTR_FLAGS,
64                 .src_nentries = 16,
65                 .src_sz_max = 256,
66                 .dest_nentries = 0,
67         },
68
69         /* CE1: target->host HTT + HTC control */
70         {
71                 .flags = CE_ATTR_FLAGS,
72                 .src_nentries = 0,
73                 .src_sz_max = 512,
74                 .dest_nentries = 512,
75         },
76
77         /* CE2: target->host WMI */
78         {
79                 .flags = CE_ATTR_FLAGS,
80                 .src_nentries = 0,
81                 .src_sz_max = 2048,
82                 .dest_nentries = 32,
83         },
84
85         /* CE3: host->target WMI */
86         {
87                 .flags = CE_ATTR_FLAGS,
88                 .src_nentries = 32,
89                 .src_sz_max = 2048,
90                 .dest_nentries = 0,
91         },
92
93         /* CE4: host->target HTT */
94         {
95                 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
96                 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
97                 .src_sz_max = 256,
98                 .dest_nentries = 0,
99         },
100
101         /* CE5: unused */
102         {
103                 .flags = CE_ATTR_FLAGS,
104                 .src_nentries = 0,
105                 .src_sz_max = 0,
106                 .dest_nentries = 0,
107         },
108
109         /* CE6: target autonomous hif_memcpy */
110         {
111                 .flags = CE_ATTR_FLAGS,
112                 .src_nentries = 0,
113                 .src_sz_max = 0,
114                 .dest_nentries = 0,
115         },
116
117         /* CE7: ce_diag, the Diagnostic Window */
118         {
119                 .flags = CE_ATTR_FLAGS,
120                 .src_nentries = 2,
121                 .src_sz_max = DIAG_TRANSFER_LIMIT,
122                 .dest_nentries = 2,
123         },
124 };
125
126 /* Target firmware's Copy Engine configuration. */
127 static const struct ce_pipe_config target_ce_config_wlan[] = {
128         /* CE0: host->target HTC control and raw streams */
129         {
130                 .pipenum = 0,
131                 .pipedir = PIPEDIR_OUT,
132                 .nentries = 32,
133                 .nbytes_max = 256,
134                 .flags = CE_ATTR_FLAGS,
135                 .reserved = 0,
136         },
137
138         /* CE1: target->host HTT + HTC control */
139         {
140                 .pipenum = 1,
141                 .pipedir = PIPEDIR_IN,
142                 .nentries = 32,
143                 .nbytes_max = 512,
144                 .flags = CE_ATTR_FLAGS,
145                 .reserved = 0,
146         },
147
148         /* CE2: target->host WMI */
149         {
150                 .pipenum = 2,
151                 .pipedir = PIPEDIR_IN,
152                 .nentries = 32,
153                 .nbytes_max = 2048,
154                 .flags = CE_ATTR_FLAGS,
155                 .reserved = 0,
156         },
157
158         /* CE3: host->target WMI */
159         {
160                 .pipenum = 3,
161                 .pipedir = PIPEDIR_OUT,
162                 .nentries = 32,
163                 .nbytes_max = 2048,
164                 .flags = CE_ATTR_FLAGS,
165                 .reserved = 0,
166         },
167
168         /* CE4: host->target HTT */
169         {
170                 .pipenum = 4,
171                 .pipedir = PIPEDIR_OUT,
172                 .nentries = 256,
173                 .nbytes_max = 256,
174                 .flags = CE_ATTR_FLAGS,
175                 .reserved = 0,
176         },
177
178         /* NB: 50% of src nentries, since tx has 2 frags */
179
180         /* CE5: unused */
181         {
182                 .pipenum = 5,
183                 .pipedir = PIPEDIR_OUT,
184                 .nentries = 32,
185                 .nbytes_max = 2048,
186                 .flags = CE_ATTR_FLAGS,
187                 .reserved = 0,
188         },
189
190         /* CE6: Reserved for target autonomous hif_memcpy */
191         {
192                 .pipenum = 6,
193                 .pipedir = PIPEDIR_INOUT,
194                 .nentries = 32,
195                 .nbytes_max = 4096,
196                 .flags = CE_ATTR_FLAGS,
197                 .reserved = 0,
198         },
199
200         /* CE7 used only by Host */
201 };
202
203 /*
204  * Diagnostic read/write access is provided for startup/config/debug usage.
205  * Caller must guarantee proper alignment, when applicable, and single user
206  * at any moment.
207  */
208 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
209                                     int nbytes)
210 {
211         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
212         int ret = 0;
213         u32 buf;
214         unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
215         unsigned int id;
216         unsigned int flags;
217         struct ath10k_ce_pipe *ce_diag;
218         /* Host buffer address in CE space */
219         u32 ce_data;
220         dma_addr_t ce_data_base = 0;
221         void *data_buf = NULL;
222         int i;
223
224         /*
225          * This code cannot handle reads to non-memory space. Redirect to the
226          * register read fn but preserve the multi word read capability of
227          * this fn
228          */
229         if (address < DRAM_BASE_ADDRESS) {
230                 if (!IS_ALIGNED(address, 4) ||
231                     !IS_ALIGNED((unsigned long)data, 4))
232                         return -EIO;
233
234                 while ((nbytes >= 4) &&  ((ret = ath10k_pci_diag_read_access(
235                                            ar, address, (u32 *)data)) == 0)) {
236                         nbytes -= sizeof(u32);
237                         address += sizeof(u32);
238                         data += sizeof(u32);
239                 }
240                 return ret;
241         }
242
243         ce_diag = ar_pci->ce_diag;
244
245         /*
246          * Allocate a temporary bounce buffer to hold caller's data
247          * to be DMA'ed from Target. This guarantees
248          *   1) 4-byte alignment
249          *   2) Buffer in DMA-able space
250          */
251         orig_nbytes = nbytes;
252         data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
253                                                          orig_nbytes,
254                                                          &ce_data_base);
255
256         if (!data_buf) {
257                 ret = -ENOMEM;
258                 goto done;
259         }
260         memset(data_buf, 0, orig_nbytes);
261
262         remaining_bytes = orig_nbytes;
263         ce_data = ce_data_base;
264         while (remaining_bytes) {
265                 nbytes = min_t(unsigned int, remaining_bytes,
266                                DIAG_TRANSFER_LIMIT);
267
268                 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
269                 if (ret != 0)
270                         goto done;
271
272                 /* Request CE to send from Target(!) address to Host buffer */
273                 /*
274                  * The address supplied by the caller is in the
275                  * Target CPU virtual address space.
276                  *
277                  * In order to use this address with the diagnostic CE,
278                  * convert it from Target CPU virtual address space
279                  * to CE address space
280                  */
281                 ath10k_pci_wake(ar);
282                 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
283                                                      address);
284                 ath10k_pci_sleep(ar);
285
286                 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
287                                  0);
288                 if (ret)
289                         goto done;
290
291                 i = 0;
292                 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
293                                                      &completed_nbytes,
294                                                      &id) != 0) {
295                         mdelay(1);
296                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
297                                 ret = -EBUSY;
298                                 goto done;
299                         }
300                 }
301
302                 if (nbytes != completed_nbytes) {
303                         ret = -EIO;
304                         goto done;
305                 }
306
307                 if (buf != (u32) address) {
308                         ret = -EIO;
309                         goto done;
310                 }
311
312                 i = 0;
313                 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
314                                                      &completed_nbytes,
315                                                      &id, &flags) != 0) {
316                         mdelay(1);
317
318                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
319                                 ret = -EBUSY;
320                                 goto done;
321                         }
322                 }
323
324                 if (nbytes != completed_nbytes) {
325                         ret = -EIO;
326                         goto done;
327                 }
328
329                 if (buf != ce_data) {
330                         ret = -EIO;
331                         goto done;
332                 }
333
334                 remaining_bytes -= nbytes;
335                 address += nbytes;
336                 ce_data += nbytes;
337         }
338
339 done:
340         if (ret == 0) {
341                 /* Copy data from allocated DMA buf to caller's buf */
342                 WARN_ON_ONCE(orig_nbytes & 3);
343                 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
344                         ((u32 *)data)[i] =
345                                 __le32_to_cpu(((__le32 *)data_buf)[i]);
346                 }
347         } else
348                 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
349                            __func__, address);
350
351         if (data_buf)
352                 pci_free_consistent(ar_pci->pdev, orig_nbytes,
353                                     data_buf, ce_data_base);
354
355         return ret;
356 }
357
358 /* Read 4-byte aligned data from Target memory or register */
359 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
360                                        u32 *data)
361 {
362         /* Assume range doesn't cross this boundary */
363         if (address >= DRAM_BASE_ADDRESS)
364                 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
365
366         ath10k_pci_wake(ar);
367         *data = ath10k_pci_read32(ar, address);
368         ath10k_pci_sleep(ar);
369         return 0;
370 }
371
372 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
373                                      const void *data, int nbytes)
374 {
375         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
376         int ret = 0;
377         u32 buf;
378         unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
379         unsigned int id;
380         unsigned int flags;
381         struct ath10k_ce_pipe *ce_diag;
382         void *data_buf = NULL;
383         u32 ce_data;    /* Host buffer address in CE space */
384         dma_addr_t ce_data_base = 0;
385         int i;
386
387         ce_diag = ar_pci->ce_diag;
388
389         /*
390          * Allocate a temporary bounce buffer to hold caller's data
391          * to be DMA'ed to Target. This guarantees
392          *   1) 4-byte alignment
393          *   2) Buffer in DMA-able space
394          */
395         orig_nbytes = nbytes;
396         data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
397                                                          orig_nbytes,
398                                                          &ce_data_base);
399         if (!data_buf) {
400                 ret = -ENOMEM;
401                 goto done;
402         }
403
404         /* Copy caller's data to allocated DMA buf */
405         WARN_ON_ONCE(orig_nbytes & 3);
406         for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
407                 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
408
409         /*
410          * The address supplied by the caller is in the
411          * Target CPU virtual address space.
412          *
413          * In order to use this address with the diagnostic CE,
414          * convert it from
415          *    Target CPU virtual address space
416          * to
417          *    CE address space
418          */
419         ath10k_pci_wake(ar);
420         address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
421         ath10k_pci_sleep(ar);
422
423         remaining_bytes = orig_nbytes;
424         ce_data = ce_data_base;
425         while (remaining_bytes) {
426                 /* FIXME: check cast */
427                 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
428
429                 /* Set up to receive directly into Target(!) address */
430                 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
431                 if (ret != 0)
432                         goto done;
433
434                 /*
435                  * Request CE to send caller-supplied data that
436                  * was copied to bounce buffer to Target(!) address.
437                  */
438                 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
439                                      nbytes, 0, 0);
440                 if (ret != 0)
441                         goto done;
442
443                 i = 0;
444                 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
445                                                      &completed_nbytes,
446                                                      &id) != 0) {
447                         mdelay(1);
448
449                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
450                                 ret = -EBUSY;
451                                 goto done;
452                         }
453                 }
454
455                 if (nbytes != completed_nbytes) {
456                         ret = -EIO;
457                         goto done;
458                 }
459
460                 if (buf != ce_data) {
461                         ret = -EIO;
462                         goto done;
463                 }
464
465                 i = 0;
466                 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
467                                                      &completed_nbytes,
468                                                      &id, &flags) != 0) {
469                         mdelay(1);
470
471                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
472                                 ret = -EBUSY;
473                                 goto done;
474                         }
475                 }
476
477                 if (nbytes != completed_nbytes) {
478                         ret = -EIO;
479                         goto done;
480                 }
481
482                 if (buf != address) {
483                         ret = -EIO;
484                         goto done;
485                 }
486
487                 remaining_bytes -= nbytes;
488                 address += nbytes;
489                 ce_data += nbytes;
490         }
491
492 done:
493         if (data_buf) {
494                 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
495                                     ce_data_base);
496         }
497
498         if (ret != 0)
499                 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
500                            address);
501
502         return ret;
503 }
504
505 /* Write 4B data to Target memory or register */
506 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
507                                         u32 data)
508 {
509         /* Assume range doesn't cross this boundary */
510         if (address >= DRAM_BASE_ADDRESS)
511                 return ath10k_pci_diag_write_mem(ar, address, &data,
512                                                  sizeof(u32));
513
514         ath10k_pci_wake(ar);
515         ath10k_pci_write32(ar, address, data);
516         ath10k_pci_sleep(ar);
517         return 0;
518 }
519
520 static bool ath10k_pci_target_is_awake(struct ath10k *ar)
521 {
522         void __iomem *mem = ath10k_pci_priv(ar)->mem;
523         u32 val;
524         val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
525                        RTC_STATE_ADDRESS);
526         return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
527 }
528
529 static int ath10k_pci_wait(struct ath10k *ar)
530 {
531         int n = 100;
532
533         while (n-- && !ath10k_pci_target_is_awake(ar))
534                 msleep(10);
535
536         if (n < 0) {
537                 ath10k_warn("Unable to wakeup target\n");
538                 return -ETIMEDOUT;
539         }
540
541         return 0;
542 }
543
544 int ath10k_do_pci_wake(struct ath10k *ar)
545 {
546         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
547         void __iomem *pci_addr = ar_pci->mem;
548         int tot_delay = 0;
549         int curr_delay = 5;
550
551         if (atomic_read(&ar_pci->keep_awake_count) == 0) {
552                 /* Force AWAKE */
553                 iowrite32(PCIE_SOC_WAKE_V_MASK,
554                           pci_addr + PCIE_LOCAL_BASE_ADDRESS +
555                           PCIE_SOC_WAKE_ADDRESS);
556         }
557         atomic_inc(&ar_pci->keep_awake_count);
558
559         if (ar_pci->verified_awake)
560                 return 0;
561
562         for (;;) {
563                 if (ath10k_pci_target_is_awake(ar)) {
564                         ar_pci->verified_awake = true;
565                         return 0;
566                 }
567
568                 if (tot_delay > PCIE_WAKE_TIMEOUT) {
569                         ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
570                                     PCIE_WAKE_TIMEOUT,
571                                     atomic_read(&ar_pci->keep_awake_count));
572                         return -ETIMEDOUT;
573                 }
574
575                 udelay(curr_delay);
576                 tot_delay += curr_delay;
577
578                 if (curr_delay < 50)
579                         curr_delay += 5;
580         }
581 }
582
583 void ath10k_do_pci_sleep(struct ath10k *ar)
584 {
585         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
586         void __iomem *pci_addr = ar_pci->mem;
587
588         if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
589                 /* Allow sleep */
590                 ar_pci->verified_awake = false;
591                 iowrite32(PCIE_SOC_WAKE_RESET,
592                           pci_addr + PCIE_LOCAL_BASE_ADDRESS +
593                           PCIE_SOC_WAKE_ADDRESS);
594         }
595 }
596
597 /*
598  * FIXME: Handle OOM properly.
599  */
600 static inline
601 struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
602 {
603         struct ath10k_pci_compl *compl = NULL;
604
605         spin_lock_bh(&pipe_info->pipe_lock);
606         if (list_empty(&pipe_info->compl_free)) {
607                 ath10k_warn("Completion buffers are full\n");
608                 goto exit;
609         }
610         compl = list_first_entry(&pipe_info->compl_free,
611                                  struct ath10k_pci_compl, list);
612         list_del(&compl->list);
613 exit:
614         spin_unlock_bh(&pipe_info->pipe_lock);
615         return compl;
616 }
617
618 /* Called by lower (CE) layer when a send to Target completes. */
619 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
620 {
621         struct ath10k *ar = ce_state->ar;
622         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
623         struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
624         struct ath10k_pci_compl *compl;
625         void *transfer_context;
626         u32 ce_data;
627         unsigned int nbytes;
628         unsigned int transfer_id;
629
630         while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
631                                              &ce_data, &nbytes,
632                                              &transfer_id) == 0) {
633                 compl = get_free_compl(pipe_info);
634                 if (!compl)
635                         break;
636
637                 compl->state = ATH10K_PCI_COMPL_SEND;
638                 compl->ce_state = ce_state;
639                 compl->pipe_info = pipe_info;
640                 compl->skb = transfer_context;
641                 compl->nbytes = nbytes;
642                 compl->transfer_id = transfer_id;
643                 compl->flags = 0;
644
645                 /*
646                  * Add the completion to the processing queue.
647                  */
648                 spin_lock_bh(&ar_pci->compl_lock);
649                 list_add_tail(&compl->list, &ar_pci->compl_process);
650                 spin_unlock_bh(&ar_pci->compl_lock);
651         }
652
653         ath10k_pci_process_ce(ar);
654 }
655
656 /* Called by lower (CE) layer when data is received from the Target. */
657 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
658 {
659         struct ath10k *ar = ce_state->ar;
660         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
661         struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
662         struct ath10k_pci_compl *compl;
663         struct sk_buff *skb;
664         void *transfer_context;
665         u32 ce_data;
666         unsigned int nbytes;
667         unsigned int transfer_id;
668         unsigned int flags;
669
670         while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
671                                              &ce_data, &nbytes, &transfer_id,
672                                              &flags) == 0) {
673                 compl = get_free_compl(pipe_info);
674                 if (!compl)
675                         break;
676
677                 compl->state = ATH10K_PCI_COMPL_RECV;
678                 compl->ce_state = ce_state;
679                 compl->pipe_info = pipe_info;
680                 compl->skb = transfer_context;
681                 compl->nbytes = nbytes;
682                 compl->transfer_id = transfer_id;
683                 compl->flags = flags;
684
685                 skb = transfer_context;
686                 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
687                                  skb->len + skb_tailroom(skb),
688                                  DMA_FROM_DEVICE);
689                 /*
690                  * Add the completion to the processing queue.
691                  */
692                 spin_lock_bh(&ar_pci->compl_lock);
693                 list_add_tail(&compl->list, &ar_pci->compl_process);
694                 spin_unlock_bh(&ar_pci->compl_lock);
695         }
696
697         ath10k_pci_process_ce(ar);
698 }
699
700 /* Send the first nbytes bytes of the buffer */
701 static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
702                                     unsigned int transfer_id,
703                                     unsigned int bytes, struct sk_buff *nbuf)
704 {
705         struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
706         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
707         struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
708         struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
709         unsigned int len;
710         u32 flags = 0;
711         int ret;
712
713         len = min(bytes, nbuf->len);
714         bytes -= len;
715
716         if (len & 3)
717                 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
718
719         ath10k_dbg(ATH10K_DBG_PCI,
720                    "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
721                    nbuf->data, (unsigned long long) skb_cb->paddr,
722                    nbuf->len, len);
723         ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
724                         "ath10k tx: data: ",
725                         nbuf->data, nbuf->len);
726
727         ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
728                              flags);
729         if (ret)
730                 ath10k_warn("CE send failed: %p\n", nbuf);
731
732         return ret;
733 }
734
735 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
736 {
737         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
738         return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
739 }
740
741 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
742 {
743         u32 reg_dump_area = 0;
744         u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
745         u32 host_addr;
746         int ret;
747         u32 i;
748
749         ath10k_err("firmware crashed!\n");
750         ath10k_err("hardware name %s version 0x%x\n",
751                    ar->hw_params.name, ar->target_version);
752         ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
753                    ar->fw_version_minor, ar->fw_version_release,
754                    ar->fw_version_build);
755
756         host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
757         if (ath10k_pci_diag_read_mem(ar, host_addr,
758                                      &reg_dump_area, sizeof(u32)) != 0) {
759                 ath10k_warn("could not read hi_failure_state\n");
760                 return;
761         }
762
763         ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
764
765         ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
766                                        &reg_dump_values[0],
767                                        REG_DUMP_COUNT_QCA988X * sizeof(u32));
768         if (ret != 0) {
769                 ath10k_err("could not dump FW Dump Area\n");
770                 return;
771         }
772
773         BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
774
775         ath10k_err("target Register Dump\n");
776         for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
777                 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
778                            i,
779                            reg_dump_values[i],
780                            reg_dump_values[i + 1],
781                            reg_dump_values[i + 2],
782                            reg_dump_values[i + 3]);
783
784         queue_work(ar->workqueue, &ar->restart_work);
785 }
786
787 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
788                                                int force)
789 {
790         if (!force) {
791                 int resources;
792                 /*
793                  * Decide whether to actually poll for completions, or just
794                  * wait for a later chance.
795                  * If there seem to be plenty of resources left, then just wait
796                  * since checking involves reading a CE register, which is a
797                  * relatively expensive operation.
798                  */
799                 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
800
801                 /*
802                  * If at least 50% of the total resources are still available,
803                  * don't bother checking again yet.
804                  */
805                 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
806                         return;
807         }
808         ath10k_ce_per_engine_service(ar, pipe);
809 }
810
811 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
812                                          struct ath10k_hif_cb *callbacks)
813 {
814         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
815
816         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
817
818         memcpy(&ar_pci->msg_callbacks_current, callbacks,
819                sizeof(ar_pci->msg_callbacks_current));
820 }
821
822 static int ath10k_pci_start_ce(struct ath10k *ar)
823 {
824         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
825         struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag;
826         const struct ce_attr *attr;
827         struct ath10k_pci_pipe *pipe_info;
828         struct ath10k_pci_compl *compl;
829         int i, pipe_num, completions, disable_interrupts;
830
831         spin_lock_init(&ar_pci->compl_lock);
832         INIT_LIST_HEAD(&ar_pci->compl_process);
833
834         for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
835                 pipe_info = &ar_pci->pipe_info[pipe_num];
836
837                 spin_lock_init(&pipe_info->pipe_lock);
838                 INIT_LIST_HEAD(&pipe_info->compl_free);
839
840                 /* Handle Diagnostic CE specially */
841                 if (pipe_info->ce_hdl == ce_diag)
842                         continue;
843
844                 attr = &host_ce_config_wlan[pipe_num];
845                 completions = 0;
846
847                 if (attr->src_nentries) {
848                         disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
849                         ath10k_ce_send_cb_register(pipe_info->ce_hdl,
850                                                    ath10k_pci_ce_send_done,
851                                                    disable_interrupts);
852                         completions += attr->src_nentries;
853                 }
854
855                 if (attr->dest_nentries) {
856                         ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
857                                                    ath10k_pci_ce_recv_data);
858                         completions += attr->dest_nentries;
859                 }
860
861                 if (completions == 0)
862                         continue;
863
864                 for (i = 0; i < completions; i++) {
865                         compl = kmalloc(sizeof(*compl), GFP_KERNEL);
866                         if (!compl) {
867                                 ath10k_warn("No memory for completion state\n");
868                                 ath10k_pci_stop_ce(ar);
869                                 return -ENOMEM;
870                         }
871
872                         compl->state = ATH10K_PCI_COMPL_FREE;
873                         list_add_tail(&compl->list, &pipe_info->compl_free);
874                 }
875         }
876
877         return 0;
878 }
879
880 static void ath10k_pci_kill_tasklet(struct ath10k *ar)
881 {
882         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
883         int i;
884
885         tasklet_kill(&ar_pci->intr_tq);
886         tasklet_kill(&ar_pci->msi_fw_err);
887
888         for (i = 0; i < CE_COUNT; i++)
889                 tasklet_kill(&ar_pci->pipe_info[i].intr);
890 }
891
892 static void ath10k_pci_stop_ce(struct ath10k *ar)
893 {
894         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
895         struct ath10k_pci_compl *compl;
896         struct sk_buff *skb;
897
898         ath10k_ce_disable_interrupts(ar);
899         ath10k_pci_kill_tasklet(ar);
900
901         /* Mark pending completions as aborted, so that upper layers free up
902          * their associated resources */
903         spin_lock_bh(&ar_pci->compl_lock);
904         list_for_each_entry(compl, &ar_pci->compl_process, list) {
905                 skb = compl->skb;
906                 ATH10K_SKB_CB(skb)->is_aborted = true;
907         }
908         spin_unlock_bh(&ar_pci->compl_lock);
909 }
910
911 static void ath10k_pci_cleanup_ce(struct ath10k *ar)
912 {
913         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
914         struct ath10k_pci_compl *compl, *tmp;
915         struct ath10k_pci_pipe *pipe_info;
916         struct sk_buff *netbuf;
917         int pipe_num;
918
919         /* Free pending completions. */
920         spin_lock_bh(&ar_pci->compl_lock);
921         if (!list_empty(&ar_pci->compl_process))
922                 ath10k_warn("pending completions still present! possible memory leaks.\n");
923
924         list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
925                 list_del(&compl->list);
926                 netbuf = compl->skb;
927                 dev_kfree_skb_any(netbuf);
928                 kfree(compl);
929         }
930         spin_unlock_bh(&ar_pci->compl_lock);
931
932         /* Free unused completions for each pipe. */
933         for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
934                 pipe_info = &ar_pci->pipe_info[pipe_num];
935
936                 spin_lock_bh(&pipe_info->pipe_lock);
937                 list_for_each_entry_safe(compl, tmp,
938                                          &pipe_info->compl_free, list) {
939                         list_del(&compl->list);
940                         kfree(compl);
941                 }
942                 spin_unlock_bh(&pipe_info->pipe_lock);
943         }
944 }
945
946 static void ath10k_pci_process_ce(struct ath10k *ar)
947 {
948         struct ath10k_pci *ar_pci = ar->hif.priv;
949         struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
950         struct ath10k_pci_compl *compl;
951         struct sk_buff *skb;
952         unsigned int nbytes;
953         int ret, send_done = 0;
954
955         /* Upper layers aren't ready to handle tx/rx completions in parallel so
956          * we must serialize all completion processing. */
957
958         spin_lock_bh(&ar_pci->compl_lock);
959         if (ar_pci->compl_processing) {
960                 spin_unlock_bh(&ar_pci->compl_lock);
961                 return;
962         }
963         ar_pci->compl_processing = true;
964         spin_unlock_bh(&ar_pci->compl_lock);
965
966         for (;;) {
967                 spin_lock_bh(&ar_pci->compl_lock);
968                 if (list_empty(&ar_pci->compl_process)) {
969                         spin_unlock_bh(&ar_pci->compl_lock);
970                         break;
971                 }
972                 compl = list_first_entry(&ar_pci->compl_process,
973                                          struct ath10k_pci_compl, list);
974                 list_del(&compl->list);
975                 spin_unlock_bh(&ar_pci->compl_lock);
976
977                 switch (compl->state) {
978                 case ATH10K_PCI_COMPL_SEND:
979                         cb->tx_completion(ar,
980                                           compl->skb,
981                                           compl->transfer_id);
982                         send_done = 1;
983                         break;
984                 case ATH10K_PCI_COMPL_RECV:
985                         ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
986                         if (ret) {
987                                 ath10k_warn("Unable to post recv buffer for pipe: %d\n",
988                                             compl->pipe_info->pipe_num);
989                                 break;
990                         }
991
992                         skb = compl->skb;
993                         nbytes = compl->nbytes;
994
995                         ath10k_dbg(ATH10K_DBG_PCI,
996                                    "ath10k_pci_ce_recv_data netbuf=%p  nbytes=%d\n",
997                                    skb, nbytes);
998                         ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
999                                         "ath10k rx: ", skb->data, nbytes);
1000
1001                         if (skb->len + skb_tailroom(skb) >= nbytes) {
1002                                 skb_trim(skb, 0);
1003                                 skb_put(skb, nbytes);
1004                                 cb->rx_completion(ar, skb,
1005                                                   compl->pipe_info->pipe_num);
1006                         } else {
1007                                 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
1008                                             nbytes,
1009                                             skb->len + skb_tailroom(skb));
1010                         }
1011                         break;
1012                 case ATH10K_PCI_COMPL_FREE:
1013                         ath10k_warn("free completion cannot be processed\n");
1014                         break;
1015                 default:
1016                         ath10k_warn("invalid completion state (%d)\n",
1017                                     compl->state);
1018                         break;
1019                 }
1020
1021                 compl->state = ATH10K_PCI_COMPL_FREE;
1022
1023                 /*
1024                  * Add completion back to the pipe's free list.
1025                  */
1026                 spin_lock_bh(&compl->pipe_info->pipe_lock);
1027                 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
1028                 spin_unlock_bh(&compl->pipe_info->pipe_lock);
1029         }
1030
1031         spin_lock_bh(&ar_pci->compl_lock);
1032         ar_pci->compl_processing = false;
1033         spin_unlock_bh(&ar_pci->compl_lock);
1034 }
1035
1036 /* TODO - temporary mapping while we have too few CE's */
1037 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1038                                               u16 service_id, u8 *ul_pipe,
1039                                               u8 *dl_pipe, int *ul_is_polled,
1040                                               int *dl_is_polled)
1041 {
1042         int ret = 0;
1043
1044         /* polling for received messages not supported */
1045         *dl_is_polled = 0;
1046
1047         switch (service_id) {
1048         case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
1049                 /*
1050                  * Host->target HTT gets its own pipe, so it can be polled
1051                  * while other pipes are interrupt driven.
1052                  */
1053                 *ul_pipe = 4;
1054                 /*
1055                  * Use the same target->host pipe for HTC ctrl, HTC raw
1056                  * streams, and HTT.
1057                  */
1058                 *dl_pipe = 1;
1059                 break;
1060
1061         case ATH10K_HTC_SVC_ID_RSVD_CTRL:
1062         case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
1063                 /*
1064                  * Note: HTC_RAW_STREAMS_SVC is currently unused, and
1065                  * HTC_CTRL_RSVD_SVC could share the same pipe as the
1066                  * WMI services.  So, if another CE is needed, change
1067                  * this to *ul_pipe = 3, which frees up CE 0.
1068                  */
1069                 /* *ul_pipe = 3; */
1070                 *ul_pipe = 0;
1071                 *dl_pipe = 1;
1072                 break;
1073
1074         case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1075         case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1076         case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1077         case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1078
1079         case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1080                 *ul_pipe = 3;
1081                 *dl_pipe = 2;
1082                 break;
1083
1084                 /* pipe 5 unused   */
1085                 /* pipe 6 reserved */
1086                 /* pipe 7 reserved */
1087
1088         default:
1089                 ret = -1;
1090                 break;
1091         }
1092         *ul_is_polled =
1093                 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1094
1095         return ret;
1096 }
1097
1098 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1099                                                 u8 *ul_pipe, u8 *dl_pipe)
1100 {
1101         int ul_is_polled, dl_is_polled;
1102
1103         (void)ath10k_pci_hif_map_service_to_pipe(ar,
1104                                                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1105                                                  ul_pipe,
1106                                                  dl_pipe,
1107                                                  &ul_is_polled,
1108                                                  &dl_is_polled);
1109 }
1110
1111 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
1112                                    int num)
1113 {
1114         struct ath10k *ar = pipe_info->hif_ce_state;
1115         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1116         struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
1117         struct sk_buff *skb;
1118         dma_addr_t ce_data;
1119         int i, ret = 0;
1120
1121         if (pipe_info->buf_sz == 0)
1122                 return 0;
1123
1124         for (i = 0; i < num; i++) {
1125                 skb = dev_alloc_skb(pipe_info->buf_sz);
1126                 if (!skb) {
1127                         ath10k_warn("could not allocate skbuff for pipe %d\n",
1128                                     num);
1129                         ret = -ENOMEM;
1130                         goto err;
1131                 }
1132
1133                 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1134
1135                 ce_data = dma_map_single(ar->dev, skb->data,
1136                                          skb->len + skb_tailroom(skb),
1137                                          DMA_FROM_DEVICE);
1138
1139                 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1140                         ath10k_warn("could not dma map skbuff\n");
1141                         dev_kfree_skb_any(skb);
1142                         ret = -EIO;
1143                         goto err;
1144                 }
1145
1146                 ATH10K_SKB_CB(skb)->paddr = ce_data;
1147
1148                 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1149                                                pipe_info->buf_sz,
1150                                                PCI_DMA_FROMDEVICE);
1151
1152                 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1153                                                  ce_data);
1154                 if (ret) {
1155                         ath10k_warn("could not enqueue to pipe %d (%d)\n",
1156                                     num, ret);
1157                         goto err;
1158                 }
1159         }
1160
1161         return ret;
1162
1163 err:
1164         ath10k_pci_rx_pipe_cleanup(pipe_info);
1165         return ret;
1166 }
1167
1168 static int ath10k_pci_post_rx(struct ath10k *ar)
1169 {
1170         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1171         struct ath10k_pci_pipe *pipe_info;
1172         const struct ce_attr *attr;
1173         int pipe_num, ret = 0;
1174
1175         for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1176                 pipe_info = &ar_pci->pipe_info[pipe_num];
1177                 attr = &host_ce_config_wlan[pipe_num];
1178
1179                 if (attr->dest_nentries == 0)
1180                         continue;
1181
1182                 ret = ath10k_pci_post_rx_pipe(pipe_info,
1183                                               attr->dest_nentries - 1);
1184                 if (ret) {
1185                         ath10k_warn("Unable to replenish recv buffers for pipe: %d\n",
1186                                     pipe_num);
1187
1188                         for (; pipe_num >= 0; pipe_num--) {
1189                                 pipe_info = &ar_pci->pipe_info[pipe_num];
1190                                 ath10k_pci_rx_pipe_cleanup(pipe_info);
1191                         }
1192                         return ret;
1193                 }
1194         }
1195
1196         return 0;
1197 }
1198
1199 static int ath10k_pci_hif_start(struct ath10k *ar)
1200 {
1201         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1202         int ret;
1203
1204         ret = ath10k_pci_start_ce(ar);
1205         if (ret) {
1206                 ath10k_warn("could not start CE (%d)\n", ret);
1207                 return ret;
1208         }
1209
1210         /* Post buffers once to start things off. */
1211         ret = ath10k_pci_post_rx(ar);
1212         if (ret) {
1213                 ath10k_warn("could not post rx pipes (%d)\n", ret);
1214                 return ret;
1215         }
1216
1217         ar_pci->started = 1;
1218         return 0;
1219 }
1220
1221 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1222 {
1223         struct ath10k *ar;
1224         struct ath10k_pci *ar_pci;
1225         struct ath10k_ce_pipe *ce_hdl;
1226         u32 buf_sz;
1227         struct sk_buff *netbuf;
1228         u32 ce_data;
1229
1230         buf_sz = pipe_info->buf_sz;
1231
1232         /* Unused Copy Engine */
1233         if (buf_sz == 0)
1234                 return;
1235
1236         ar = pipe_info->hif_ce_state;
1237         ar_pci = ath10k_pci_priv(ar);
1238
1239         if (!ar_pci->started)
1240                 return;
1241
1242         ce_hdl = pipe_info->ce_hdl;
1243
1244         while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1245                                           &ce_data) == 0) {
1246                 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1247                                  netbuf->len + skb_tailroom(netbuf),
1248                                  DMA_FROM_DEVICE);
1249                 dev_kfree_skb_any(netbuf);
1250         }
1251 }
1252
1253 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1254 {
1255         struct ath10k *ar;
1256         struct ath10k_pci *ar_pci;
1257         struct ath10k_ce_pipe *ce_hdl;
1258         struct sk_buff *netbuf;
1259         u32 ce_data;
1260         unsigned int nbytes;
1261         unsigned int id;
1262         u32 buf_sz;
1263
1264         buf_sz = pipe_info->buf_sz;
1265
1266         /* Unused Copy Engine */
1267         if (buf_sz == 0)
1268                 return;
1269
1270         ar = pipe_info->hif_ce_state;
1271         ar_pci = ath10k_pci_priv(ar);
1272
1273         if (!ar_pci->started)
1274                 return;
1275
1276         ce_hdl = pipe_info->ce_hdl;
1277
1278         while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1279                                           &ce_data, &nbytes, &id) == 0) {
1280                 /*
1281                  * Indicate the completion to higer layer to free
1282                  * the buffer
1283                  */
1284                 ATH10K_SKB_CB(netbuf)->is_aborted = true;
1285                 ar_pci->msg_callbacks_current.tx_completion(ar,
1286                                                             netbuf,
1287                                                             id);
1288         }
1289 }
1290
1291 /*
1292  * Cleanup residual buffers for device shutdown:
1293  *    buffers that were enqueued for receive
1294  *    buffers that were to be sent
1295  * Note: Buffers that had completed but which were
1296  * not yet processed are on a completion queue. They
1297  * are handled when the completion thread shuts down.
1298  */
1299 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1300 {
1301         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1302         int pipe_num;
1303
1304         for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1305                 struct ath10k_pci_pipe *pipe_info;
1306
1307                 pipe_info = &ar_pci->pipe_info[pipe_num];
1308                 ath10k_pci_rx_pipe_cleanup(pipe_info);
1309                 ath10k_pci_tx_pipe_cleanup(pipe_info);
1310         }
1311 }
1312
1313 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1314 {
1315         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1316         struct ath10k_pci_pipe *pipe_info;
1317         int pipe_num;
1318
1319         for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1320                 pipe_info = &ar_pci->pipe_info[pipe_num];
1321                 if (pipe_info->ce_hdl) {
1322                         ath10k_ce_deinit(pipe_info->ce_hdl);
1323                         pipe_info->ce_hdl = NULL;
1324                         pipe_info->buf_sz = 0;
1325                 }
1326         }
1327 }
1328
1329 static void ath10k_pci_disable_irqs(struct ath10k *ar)
1330 {
1331         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1332         int i;
1333
1334         for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1335                 disable_irq(ar_pci->pdev->irq + i);
1336 }
1337
1338 static void ath10k_pci_hif_stop(struct ath10k *ar)
1339 {
1340         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1341
1342         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1343
1344         /* Irqs are never explicitly re-enabled. They are implicitly re-enabled
1345          * by ath10k_pci_start_intr(). */
1346         ath10k_pci_disable_irqs(ar);
1347
1348         ath10k_pci_stop_ce(ar);
1349
1350         /* At this point, asynchronous threads are stopped, the target should
1351          * not DMA nor interrupt. We process the leftovers and then free
1352          * everything else up. */
1353
1354         ath10k_pci_process_ce(ar);
1355         ath10k_pci_cleanup_ce(ar);
1356         ath10k_pci_buffer_cleanup(ar);
1357
1358         ar_pci->started = 0;
1359 }
1360
1361 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1362                                            void *req, u32 req_len,
1363                                            void *resp, u32 *resp_len)
1364 {
1365         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1366         struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1367         struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1368         struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1369         struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1370         dma_addr_t req_paddr = 0;
1371         dma_addr_t resp_paddr = 0;
1372         struct bmi_xfer xfer = {};
1373         void *treq, *tresp = NULL;
1374         int ret = 0;
1375
1376         if (resp && !resp_len)
1377                 return -EINVAL;
1378
1379         if (resp && resp_len && *resp_len == 0)
1380                 return -EINVAL;
1381
1382         treq = kmemdup(req, req_len, GFP_KERNEL);
1383         if (!treq)
1384                 return -ENOMEM;
1385
1386         req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1387         ret = dma_mapping_error(ar->dev, req_paddr);
1388         if (ret)
1389                 goto err_dma;
1390
1391         if (resp && resp_len) {
1392                 tresp = kzalloc(*resp_len, GFP_KERNEL);
1393                 if (!tresp) {
1394                         ret = -ENOMEM;
1395                         goto err_req;
1396                 }
1397
1398                 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1399                                             DMA_FROM_DEVICE);
1400                 ret = dma_mapping_error(ar->dev, resp_paddr);
1401                 if (ret)
1402                         goto err_req;
1403
1404                 xfer.wait_for_resp = true;
1405                 xfer.resp_len = 0;
1406
1407                 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1408         }
1409
1410         init_completion(&xfer.done);
1411
1412         ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1413         if (ret)
1414                 goto err_resp;
1415
1416         ret = wait_for_completion_timeout(&xfer.done,
1417                                           BMI_COMMUNICATION_TIMEOUT_HZ);
1418         if (ret <= 0) {
1419                 u32 unused_buffer;
1420                 unsigned int unused_nbytes;
1421                 unsigned int unused_id;
1422
1423                 ret = -ETIMEDOUT;
1424                 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1425                                            &unused_nbytes, &unused_id);
1426         } else {
1427                 /* non-zero means we did not time out */
1428                 ret = 0;
1429         }
1430
1431 err_resp:
1432         if (resp) {
1433                 u32 unused_buffer;
1434
1435                 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1436                 dma_unmap_single(ar->dev, resp_paddr,
1437                                  *resp_len, DMA_FROM_DEVICE);
1438         }
1439 err_req:
1440         dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1441
1442         if (ret == 0 && resp_len) {
1443                 *resp_len = min(*resp_len, xfer.resp_len);
1444                 memcpy(resp, tresp, xfer.resp_len);
1445         }
1446 err_dma:
1447         kfree(treq);
1448         kfree(tresp);
1449
1450         return ret;
1451 }
1452
1453 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1454 {
1455         struct bmi_xfer *xfer;
1456         u32 ce_data;
1457         unsigned int nbytes;
1458         unsigned int transfer_id;
1459
1460         if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1461                                           &nbytes, &transfer_id))
1462                 return;
1463
1464         if (xfer->wait_for_resp)
1465                 return;
1466
1467         complete(&xfer->done);
1468 }
1469
1470 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1471 {
1472         struct bmi_xfer *xfer;
1473         u32 ce_data;
1474         unsigned int nbytes;
1475         unsigned int transfer_id;
1476         unsigned int flags;
1477
1478         if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1479                                           &nbytes, &transfer_id, &flags))
1480                 return;
1481
1482         if (!xfer->wait_for_resp) {
1483                 ath10k_warn("unexpected: BMI data received; ignoring\n");
1484                 return;
1485         }
1486
1487         xfer->resp_len = nbytes;
1488         complete(&xfer->done);
1489 }
1490
1491 /*
1492  * Map from service/endpoint to Copy Engine.
1493  * This table is derived from the CE_PCI TABLE, above.
1494  * It is passed to the Target at startup for use by firmware.
1495  */
1496 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1497         {
1498                  ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1499                  PIPEDIR_OUT,           /* out = UL = host -> target */
1500                  3,
1501         },
1502         {
1503                  ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1504                  PIPEDIR_IN,            /* in = DL = target -> host */
1505                  2,
1506         },
1507         {
1508                  ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1509                  PIPEDIR_OUT,           /* out = UL = host -> target */
1510                  3,
1511         },
1512         {
1513                  ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1514                  PIPEDIR_IN,            /* in = DL = target -> host */
1515                  2,
1516         },
1517         {
1518                  ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1519                  PIPEDIR_OUT,           /* out = UL = host -> target */
1520                  3,
1521         },
1522         {
1523                  ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1524                  PIPEDIR_IN,            /* in = DL = target -> host */
1525                  2,
1526         },
1527         {
1528                  ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1529                  PIPEDIR_OUT,           /* out = UL = host -> target */
1530                  3,
1531         },
1532         {
1533                  ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1534                  PIPEDIR_IN,            /* in = DL = target -> host */
1535                  2,
1536         },
1537         {
1538                  ATH10K_HTC_SVC_ID_WMI_CONTROL,
1539                  PIPEDIR_OUT,           /* out = UL = host -> target */
1540                  3,
1541         },
1542         {
1543                  ATH10K_HTC_SVC_ID_WMI_CONTROL,
1544                  PIPEDIR_IN,            /* in = DL = target -> host */
1545                  2,
1546         },
1547         {
1548                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1549                  PIPEDIR_OUT,           /* out = UL = host -> target */
1550                  0,             /* could be moved to 3 (share with WMI) */
1551         },
1552         {
1553                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1554                  PIPEDIR_IN,            /* in = DL = target -> host */
1555                  1,
1556         },
1557         {
1558                  ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
1559                  PIPEDIR_OUT,           /* out = UL = host -> target */
1560                  0,
1561         },
1562         {
1563                  ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
1564                  PIPEDIR_IN,            /* in = DL = target -> host */
1565                  1,
1566         },
1567         {
1568                  ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1569                  PIPEDIR_OUT,           /* out = UL = host -> target */
1570                  4,
1571         },
1572         {
1573                  ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1574                  PIPEDIR_IN,            /* in = DL = target -> host */
1575                  1,
1576         },
1577
1578         /* (Additions here) */
1579
1580         {                               /* Must be last */
1581                  0,
1582                  0,
1583                  0,
1584         },
1585 };
1586
1587 /*
1588  * Send an interrupt to the device to wake up the Target CPU
1589  * so it has an opportunity to notice any changed state.
1590  */
1591 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1592 {
1593         int ret;
1594         u32 core_ctrl;
1595
1596         ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1597                                               CORE_CTRL_ADDRESS,
1598                                           &core_ctrl);
1599         if (ret) {
1600                 ath10k_warn("Unable to read core ctrl\n");
1601                 return ret;
1602         }
1603
1604         /* A_INUM_FIRMWARE interrupt to Target CPU */
1605         core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1606
1607         ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1608                                                CORE_CTRL_ADDRESS,
1609                                            core_ctrl);
1610         if (ret)
1611                 ath10k_warn("Unable to set interrupt mask\n");
1612
1613         return ret;
1614 }
1615
1616 static int ath10k_pci_init_config(struct ath10k *ar)
1617 {
1618         u32 interconnect_targ_addr;
1619         u32 pcie_state_targ_addr = 0;
1620         u32 pipe_cfg_targ_addr = 0;
1621         u32 svc_to_pipe_map = 0;
1622         u32 pcie_config_flags = 0;
1623         u32 ealloc_value;
1624         u32 ealloc_targ_addr;
1625         u32 flag2_value;
1626         u32 flag2_targ_addr;
1627         int ret = 0;
1628
1629         /* Download to Target the CE Config and the service-to-CE map */
1630         interconnect_targ_addr =
1631                 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1632
1633         /* Supply Target-side CE configuration */
1634         ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1635                                           &pcie_state_targ_addr);
1636         if (ret != 0) {
1637                 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1638                 return ret;
1639         }
1640
1641         if (pcie_state_targ_addr == 0) {
1642                 ret = -EIO;
1643                 ath10k_err("Invalid pcie state addr\n");
1644                 return ret;
1645         }
1646
1647         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1648                                           offsetof(struct pcie_state,
1649                                                    pipe_cfg_addr),
1650                                           &pipe_cfg_targ_addr);
1651         if (ret != 0) {
1652                 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1653                 return ret;
1654         }
1655
1656         if (pipe_cfg_targ_addr == 0) {
1657                 ret = -EIO;
1658                 ath10k_err("Invalid pipe cfg addr\n");
1659                 return ret;
1660         }
1661
1662         ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1663                                  target_ce_config_wlan,
1664                                  sizeof(target_ce_config_wlan));
1665
1666         if (ret != 0) {
1667                 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1668                 return ret;
1669         }
1670
1671         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1672                                           offsetof(struct pcie_state,
1673                                                    svc_to_pipe_map),
1674                                           &svc_to_pipe_map);
1675         if (ret != 0) {
1676                 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1677                 return ret;
1678         }
1679
1680         if (svc_to_pipe_map == 0) {
1681                 ret = -EIO;
1682                 ath10k_err("Invalid svc_to_pipe map\n");
1683                 return ret;
1684         }
1685
1686         ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1687                                  target_service_to_ce_map_wlan,
1688                                  sizeof(target_service_to_ce_map_wlan));
1689         if (ret != 0) {
1690                 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1691                 return ret;
1692         }
1693
1694         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1695                                           offsetof(struct pcie_state,
1696                                                    config_flags),
1697                                           &pcie_config_flags);
1698         if (ret != 0) {
1699                 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1700                 return ret;
1701         }
1702
1703         pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1704
1705         ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1706                                  offsetof(struct pcie_state, config_flags),
1707                                  &pcie_config_flags,
1708                                  sizeof(pcie_config_flags));
1709         if (ret != 0) {
1710                 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1711                 return ret;
1712         }
1713
1714         /* configure early allocation */
1715         ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1716
1717         ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1718         if (ret != 0) {
1719                 ath10k_err("Faile to get early alloc val: %d\n", ret);
1720                 return ret;
1721         }
1722
1723         /* first bank is switched to IRAM */
1724         ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1725                          HI_EARLY_ALLOC_MAGIC_MASK);
1726         ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1727                          HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1728
1729         ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1730         if (ret != 0) {
1731                 ath10k_err("Failed to set early alloc val: %d\n", ret);
1732                 return ret;
1733         }
1734
1735         /* Tell Target to proceed with initialization */
1736         flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1737
1738         ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1739         if (ret != 0) {
1740                 ath10k_err("Failed to get option val: %d\n", ret);
1741                 return ret;
1742         }
1743
1744         flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1745
1746         ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1747         if (ret != 0) {
1748                 ath10k_err("Failed to set option val: %d\n", ret);
1749                 return ret;
1750         }
1751
1752         return 0;
1753 }
1754
1755
1756
1757 static int ath10k_pci_ce_init(struct ath10k *ar)
1758 {
1759         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1760         struct ath10k_pci_pipe *pipe_info;
1761         const struct ce_attr *attr;
1762         int pipe_num;
1763
1764         for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1765                 pipe_info = &ar_pci->pipe_info[pipe_num];
1766                 pipe_info->pipe_num = pipe_num;
1767                 pipe_info->hif_ce_state = ar;
1768                 attr = &host_ce_config_wlan[pipe_num];
1769
1770                 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1771                 if (pipe_info->ce_hdl == NULL) {
1772                         ath10k_err("Unable to initialize CE for pipe: %d\n",
1773                                    pipe_num);
1774
1775                         /* It is safe to call it here. It checks if ce_hdl is
1776                          * valid for each pipe */
1777                         ath10k_pci_ce_deinit(ar);
1778                         return -1;
1779                 }
1780
1781                 if (pipe_num == CE_COUNT - 1) {
1782                         /*
1783                          * Reserve the ultimate CE for
1784                          * diagnostic Window support
1785                          */
1786                         ar_pci->ce_diag = pipe_info->ce_hdl;
1787                         continue;
1788                 }
1789
1790                 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1791         }
1792
1793         /*
1794          * Initially, establish CE completion handlers for use with BMI.
1795          * These are overwritten with generic handlers after we exit BMI phase.
1796          */
1797         pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1798         ath10k_ce_send_cb_register(pipe_info->ce_hdl,
1799                                    ath10k_pci_bmi_send_done, 0);
1800
1801         pipe_info = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1802         ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
1803                                    ath10k_pci_bmi_recv_data);
1804
1805         return 0;
1806 }
1807
1808 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1809 {
1810         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1811         u32 fw_indicator_address, fw_indicator;
1812
1813         ath10k_pci_wake(ar);
1814
1815         fw_indicator_address = ar_pci->fw_indicator_address;
1816         fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1817
1818         if (fw_indicator & FW_IND_EVENT_PENDING) {
1819                 /* ACK: clear Target-side pending event */
1820                 ath10k_pci_write32(ar, fw_indicator_address,
1821                                    fw_indicator & ~FW_IND_EVENT_PENDING);
1822
1823                 if (ar_pci->started) {
1824                         ath10k_pci_hif_dump_area(ar);
1825                 } else {
1826                         /*
1827                          * Probable Target failure before we're prepared
1828                          * to handle it.  Generally unexpected.
1829                          */
1830                         ath10k_warn("early firmware event indicated\n");
1831                 }
1832         }
1833
1834         ath10k_pci_sleep(ar);
1835 }
1836
1837 static int ath10k_pci_hif_power_up(struct ath10k *ar)
1838 {
1839         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1840         int ret;
1841
1842         ret = ath10k_pci_start_intr(ar);
1843         if (ret) {
1844                 ath10k_err("could not start interrupt handling (%d)\n", ret);
1845                 goto err;
1846         }
1847
1848         /*
1849          * Bring the target up cleanly.
1850          *
1851          * The target may be in an undefined state with an AUX-powered Target
1852          * and a Host in WoW mode. If the Host crashes, loses power, or is
1853          * restarted (without unloading the driver) then the Target is left
1854          * (aux) powered and running. On a subsequent driver load, the Target
1855          * is in an unexpected state. We try to catch that here in order to
1856          * reset the Target and retry the probe.
1857          */
1858         ath10k_pci_device_reset(ar);
1859
1860         ret = ath10k_pci_wait_for_target_init(ar);
1861         if (ret)
1862                 goto err_irq;
1863
1864         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1865                 /* Force AWAKE forever */
1866                 ath10k_do_pci_wake(ar);
1867
1868         ret = ath10k_pci_ce_init(ar);
1869         if (ret)
1870                 goto err_ps;
1871
1872         ret = ath10k_pci_init_config(ar);
1873         if (ret)
1874                 goto err_ce;
1875
1876         ret = ath10k_pci_wake_target_cpu(ar);
1877         if (ret) {
1878                 ath10k_err("could not wake up target CPU (%d)\n", ret);
1879                 goto err_ce;
1880         }
1881
1882         return 0;
1883
1884 err_ce:
1885         ath10k_pci_ce_deinit(ar);
1886 err_ps:
1887         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1888                 ath10k_do_pci_sleep(ar);
1889 err_irq:
1890         ath10k_pci_stop_intr(ar);
1891 err:
1892         return ret;
1893 }
1894
1895 static void ath10k_pci_hif_power_down(struct ath10k *ar)
1896 {
1897         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1898
1899         ath10k_pci_stop_intr(ar);
1900
1901         ath10k_pci_ce_deinit(ar);
1902         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1903                 ath10k_do_pci_sleep(ar);
1904 }
1905
1906 #ifdef CONFIG_PM
1907
1908 #define ATH10K_PCI_PM_CONTROL 0x44
1909
1910 static int ath10k_pci_hif_suspend(struct ath10k *ar)
1911 {
1912         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1913         struct pci_dev *pdev = ar_pci->pdev;
1914         u32 val;
1915
1916         pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1917
1918         if ((val & 0x000000ff) != 0x3) {
1919                 pci_save_state(pdev);
1920                 pci_disable_device(pdev);
1921                 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1922                                        (val & 0xffffff00) | 0x03);
1923         }
1924
1925         return 0;
1926 }
1927
1928 static int ath10k_pci_hif_resume(struct ath10k *ar)
1929 {
1930         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1931         struct pci_dev *pdev = ar_pci->pdev;
1932         u32 val;
1933
1934         pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1935
1936         if ((val & 0x000000ff) != 0) {
1937                 pci_restore_state(pdev);
1938                 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1939                                        val & 0xffffff00);
1940                 /*
1941                  * Suspend/Resume resets the PCI configuration space,
1942                  * so we have to re-disable the RETRY_TIMEOUT register (0x41)
1943                  * to keep PCI Tx retries from interfering with C3 CPU state
1944                  */
1945                 pci_read_config_dword(pdev, 0x40, &val);
1946
1947                 if ((val & 0x0000ff00) != 0)
1948                         pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1949         }
1950
1951         return 0;
1952 }
1953 #endif
1954
1955 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
1956         .send_head              = ath10k_pci_hif_send_head,
1957         .exchange_bmi_msg       = ath10k_pci_hif_exchange_bmi_msg,
1958         .start                  = ath10k_pci_hif_start,
1959         .stop                   = ath10k_pci_hif_stop,
1960         .map_service_to_pipe    = ath10k_pci_hif_map_service_to_pipe,
1961         .get_default_pipe       = ath10k_pci_hif_get_default_pipe,
1962         .send_complete_check    = ath10k_pci_hif_send_complete_check,
1963         .set_callbacks          = ath10k_pci_hif_set_callbacks,
1964         .get_free_queue_number  = ath10k_pci_hif_get_free_queue_number,
1965         .power_up               = ath10k_pci_hif_power_up,
1966         .power_down             = ath10k_pci_hif_power_down,
1967 #ifdef CONFIG_PM
1968         .suspend                = ath10k_pci_hif_suspend,
1969         .resume                 = ath10k_pci_hif_resume,
1970 #endif
1971 };
1972
1973 static void ath10k_pci_ce_tasklet(unsigned long ptr)
1974 {
1975         struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
1976         struct ath10k_pci *ar_pci = pipe->ar_pci;
1977
1978         ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
1979 }
1980
1981 static void ath10k_msi_err_tasklet(unsigned long data)
1982 {
1983         struct ath10k *ar = (struct ath10k *)data;
1984
1985         ath10k_pci_fw_interrupt_handler(ar);
1986 }
1987
1988 /*
1989  * Handler for a per-engine interrupt on a PARTICULAR CE.
1990  * This is used in cases where each CE has a private MSI interrupt.
1991  */
1992 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
1993 {
1994         struct ath10k *ar = arg;
1995         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1996         int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
1997
1998         if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
1999                 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
2000                 return IRQ_HANDLED;
2001         }
2002
2003         /*
2004          * NOTE: We are able to derive ce_id from irq because we
2005          * use a one-to-one mapping for CE's 0..5.
2006          * CE's 6 & 7 do not use interrupts at all.
2007          *
2008          * This mapping must be kept in sync with the mapping
2009          * used by firmware.
2010          */
2011         tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2012         return IRQ_HANDLED;
2013 }
2014
2015 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2016 {
2017         struct ath10k *ar = arg;
2018         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2019
2020         tasklet_schedule(&ar_pci->msi_fw_err);
2021         return IRQ_HANDLED;
2022 }
2023
2024 /*
2025  * Top-level interrupt handler for all PCI interrupts from a Target.
2026  * When a block of MSI interrupts is allocated, this top-level handler
2027  * is not used; instead, we directly call the correct sub-handler.
2028  */
2029 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2030 {
2031         struct ath10k *ar = arg;
2032         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2033
2034         if (ar_pci->num_msi_intrs == 0) {
2035                 /*
2036                  * IMPORTANT: INTR_CLR regiser has to be set after
2037                  * INTR_ENABLE is set to 0, otherwise interrupt can not be
2038                  * really cleared.
2039                  */
2040                 iowrite32(0, ar_pci->mem +
2041                           (SOC_CORE_BASE_ADDRESS |
2042                            PCIE_INTR_ENABLE_ADDRESS));
2043                 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2044                           PCIE_INTR_CE_MASK_ALL,
2045                           ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2046                                          PCIE_INTR_CLR_ADDRESS));
2047                 /*
2048                  * IMPORTANT: this extra read transaction is required to
2049                  * flush the posted write buffer.
2050                  */
2051                 (void) ioread32(ar_pci->mem +
2052                                 (SOC_CORE_BASE_ADDRESS |
2053                                  PCIE_INTR_ENABLE_ADDRESS));
2054         }
2055
2056         tasklet_schedule(&ar_pci->intr_tq);
2057
2058         return IRQ_HANDLED;
2059 }
2060
2061 static void ath10k_pci_tasklet(unsigned long data)
2062 {
2063         struct ath10k *ar = (struct ath10k *)data;
2064         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2065
2066         ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2067         ath10k_ce_per_engine_service_any(ar);
2068
2069         if (ar_pci->num_msi_intrs == 0) {
2070                 /* Enable Legacy PCI line interrupts */
2071                 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2072                           PCIE_INTR_CE_MASK_ALL,
2073                           ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2074                                          PCIE_INTR_ENABLE_ADDRESS));
2075                 /*
2076                  * IMPORTANT: this extra read transaction is required to
2077                  * flush the posted write buffer
2078                  */
2079                 (void) ioread32(ar_pci->mem +
2080                                 (SOC_CORE_BASE_ADDRESS |
2081                                  PCIE_INTR_ENABLE_ADDRESS));
2082         }
2083 }
2084
2085 static int ath10k_pci_start_intr_msix(struct ath10k *ar, int num)
2086 {
2087         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2088         int ret;
2089         int i;
2090
2091         ret = pci_enable_msi_block(ar_pci->pdev, num);
2092         if (ret)
2093                 return ret;
2094
2095         ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2096                           ath10k_pci_msi_fw_handler,
2097                           IRQF_SHARED, "ath10k_pci", ar);
2098         if (ret) {
2099                 ath10k_warn("request_irq(%d) failed %d\n",
2100                             ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2101
2102                 pci_disable_msi(ar_pci->pdev);
2103                 return ret;
2104         }
2105
2106         for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2107                 ret = request_irq(ar_pci->pdev->irq + i,
2108                                   ath10k_pci_per_engine_handler,
2109                                   IRQF_SHARED, "ath10k_pci", ar);
2110                 if (ret) {
2111                         ath10k_warn("request_irq(%d) failed %d\n",
2112                                     ar_pci->pdev->irq + i, ret);
2113
2114                         for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2115                                 free_irq(ar_pci->pdev->irq + i, ar);
2116
2117                         free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2118                         pci_disable_msi(ar_pci->pdev);
2119                         return ret;
2120                 }
2121         }
2122
2123         ath10k_info("MSI-X interrupt handling (%d intrs)\n", num);
2124         return 0;
2125 }
2126
2127 static int ath10k_pci_start_intr_msi(struct ath10k *ar)
2128 {
2129         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2130         int ret;
2131
2132         ret = pci_enable_msi(ar_pci->pdev);
2133         if (ret < 0)
2134                 return ret;
2135
2136         ret = request_irq(ar_pci->pdev->irq,
2137                           ath10k_pci_interrupt_handler,
2138                           IRQF_SHARED, "ath10k_pci", ar);
2139         if (ret < 0) {
2140                 pci_disable_msi(ar_pci->pdev);
2141                 return ret;
2142         }
2143
2144         ath10k_info("MSI interrupt handling\n");
2145         return 0;
2146 }
2147
2148 static int ath10k_pci_start_intr_legacy(struct ath10k *ar)
2149 {
2150         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2151         int ret;
2152
2153         ret = request_irq(ar_pci->pdev->irq,
2154                           ath10k_pci_interrupt_handler,
2155                           IRQF_SHARED, "ath10k_pci", ar);
2156         if (ret < 0)
2157                 return ret;
2158
2159         /*
2160          * Make sure to wake the Target before enabling Legacy
2161          * Interrupt.
2162          */
2163         iowrite32(PCIE_SOC_WAKE_V_MASK,
2164                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2165                   PCIE_SOC_WAKE_ADDRESS);
2166
2167         ret = ath10k_pci_wait(ar);
2168         if (ret) {
2169                 ath10k_warn("Failed to enable legacy interrupt, target did not wake up: %d\n",
2170                             ret);
2171                 free_irq(ar_pci->pdev->irq, ar);
2172                 return ret;
2173         }
2174
2175         /*
2176          * A potential race occurs here: The CORE_BASE write
2177          * depends on target correctly decoding AXI address but
2178          * host won't know when target writes BAR to CORE_CTRL.
2179          * This write might get lost if target has NOT written BAR.
2180          * For now, fix the race by repeating the write in below
2181          * synchronization checking.
2182          */
2183         iowrite32(PCIE_INTR_FIRMWARE_MASK |
2184                   PCIE_INTR_CE_MASK_ALL,
2185                   ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2186                                  PCIE_INTR_ENABLE_ADDRESS));
2187         iowrite32(PCIE_SOC_WAKE_RESET,
2188                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2189                   PCIE_SOC_WAKE_ADDRESS);
2190
2191         ath10k_info("legacy interrupt handling\n");
2192         return 0;
2193 }
2194
2195 static int ath10k_pci_start_intr(struct ath10k *ar)
2196 {
2197         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2198         int num = MSI_NUM_REQUEST;
2199         int ret;
2200         int i;
2201
2202         tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long) ar);
2203         tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2204                      (unsigned long) ar);
2205
2206         for (i = 0; i < CE_COUNT; i++) {
2207                 ar_pci->pipe_info[i].ar_pci = ar_pci;
2208                 tasklet_init(&ar_pci->pipe_info[i].intr,
2209                              ath10k_pci_ce_tasklet,
2210                              (unsigned long)&ar_pci->pipe_info[i]);
2211         }
2212
2213         if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
2214                 num = 1;
2215
2216         if (num > 1) {
2217                 ret = ath10k_pci_start_intr_msix(ar, num);
2218                 if (ret == 0)
2219                         goto exit;
2220
2221                 ath10k_warn("MSI-X didn't succeed (%d), trying MSI\n", ret);
2222                 num = 1;
2223         }
2224
2225         if (num == 1) {
2226                 ret = ath10k_pci_start_intr_msi(ar);
2227                 if (ret == 0)
2228                         goto exit;
2229
2230                 ath10k_warn("MSI didn't succeed (%d), trying legacy INTR\n",
2231                             ret);
2232                 num = 0;
2233         }
2234
2235         ret = ath10k_pci_start_intr_legacy(ar);
2236         if (ret) {
2237                 ath10k_warn("Failed to start legacy interrupts: %d\n", ret);
2238                 return ret;
2239         }
2240
2241 exit:
2242         ar_pci->num_msi_intrs = num;
2243         return ret;
2244 }
2245
2246 static void ath10k_pci_stop_intr(struct ath10k *ar)
2247 {
2248         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2249         int i;
2250
2251         /* There's at least one interrupt irregardless whether its legacy INTR
2252          * or MSI or MSI-X */
2253         for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2254                 free_irq(ar_pci->pdev->irq + i, ar);
2255
2256         if (ar_pci->num_msi_intrs > 0)
2257                 pci_disable_msi(ar_pci->pdev);
2258 }
2259
2260 static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2261 {
2262         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2263         int wait_limit = 300; /* 3 sec */
2264         int ret;
2265
2266         /* Wait for Target to finish initialization before we proceed. */
2267         iowrite32(PCIE_SOC_WAKE_V_MASK,
2268                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2269                   PCIE_SOC_WAKE_ADDRESS);
2270
2271         ret = ath10k_pci_wait(ar);
2272         if (ret) {
2273                 ath10k_warn("Failed to reset target, target did not wake up: %d\n",
2274                             ret);
2275                 return ret;
2276         }
2277
2278         while (wait_limit-- &&
2279                !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2280                  FW_IND_INITIALIZED)) {
2281                 if (ar_pci->num_msi_intrs == 0)
2282                         /* Fix potential race by repeating CORE_BASE writes */
2283                         iowrite32(PCIE_INTR_FIRMWARE_MASK |
2284                                   PCIE_INTR_CE_MASK_ALL,
2285                                   ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2286                                                  PCIE_INTR_ENABLE_ADDRESS));
2287                 mdelay(10);
2288         }
2289
2290         if (wait_limit < 0) {
2291                 ath10k_err("Target stalled\n");
2292                 iowrite32(PCIE_SOC_WAKE_RESET,
2293                           ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2294                           PCIE_SOC_WAKE_ADDRESS);
2295                 return -EIO;
2296         }
2297
2298         iowrite32(PCIE_SOC_WAKE_RESET,
2299                   ar_pci->mem + PCIE_LOCAL_BASE_ADDRESS +
2300                   PCIE_SOC_WAKE_ADDRESS);
2301
2302         return 0;
2303 }
2304
2305 static void ath10k_pci_device_reset(struct ath10k *ar)
2306 {
2307         int i;
2308         u32 val;
2309
2310         ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
2311                                PCIE_SOC_WAKE_V_MASK);
2312         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2313                 if (ath10k_pci_target_is_awake(ar))
2314                         break;
2315                 msleep(1);
2316         }
2317
2318         /* Put Target, including PCIe, into RESET. */
2319         val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2320         val |= 1;
2321         ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2322
2323         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2324                 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2325                                           RTC_STATE_COLD_RESET_MASK)
2326                         break;
2327                 msleep(1);
2328         }
2329
2330         /* Pull Target, including PCIe, out of RESET. */
2331         val &= ~1;
2332         ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2333
2334         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2335                 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2336                                             RTC_STATE_COLD_RESET_MASK))
2337                         break;
2338                 msleep(1);
2339         }
2340
2341         ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS, PCIE_SOC_WAKE_RESET);
2342 }
2343
2344 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2345 {
2346         int i;
2347
2348         for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2349                 if (!test_bit(i, ar_pci->features))
2350                         continue;
2351
2352                 switch (i) {
2353                 case ATH10K_PCI_FEATURE_MSI_X:
2354                         ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
2355                         break;
2356                 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
2357                         ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
2358                         break;
2359                 }
2360         }
2361 }
2362
2363 static int ath10k_pci_probe(struct pci_dev *pdev,
2364                             const struct pci_device_id *pci_dev)
2365 {
2366         void __iomem *mem;
2367         int ret = 0;
2368         struct ath10k *ar;
2369         struct ath10k_pci *ar_pci;
2370         u32 lcr_val, chip_id;
2371
2372         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2373
2374         ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2375         if (ar_pci == NULL)
2376                 return -ENOMEM;
2377
2378         ar_pci->pdev = pdev;
2379         ar_pci->dev = &pdev->dev;
2380
2381         switch (pci_dev->device) {
2382         case QCA988X_2_0_DEVICE_ID:
2383                 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2384                 break;
2385         default:
2386                 ret = -ENODEV;
2387                 ath10k_err("Unkown device ID: %d\n", pci_dev->device);
2388                 goto err_ar_pci;
2389         }
2390
2391         if (ath10k_target_ps)
2392                 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2393
2394         ath10k_pci_dump_features(ar_pci);
2395
2396         ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
2397         if (!ar) {
2398                 ath10k_err("ath10k_core_create failed!\n");
2399                 ret = -EINVAL;
2400                 goto err_ar_pci;
2401         }
2402
2403         ar_pci->ar = ar;
2404         ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2405         atomic_set(&ar_pci->keep_awake_count, 0);
2406
2407         pci_set_drvdata(pdev, ar);
2408
2409         /*
2410          * Without any knowledge of the Host, the Target may have been reset or
2411          * power cycled and its Config Space may no longer reflect the PCI
2412          * address space that was assigned earlier by the PCI infrastructure.
2413          * Refresh it now.
2414          */
2415         ret = pci_assign_resource(pdev, BAR_NUM);
2416         if (ret) {
2417                 ath10k_err("cannot assign PCI space: %d\n", ret);
2418                 goto err_ar;
2419         }
2420
2421         ret = pci_enable_device(pdev);
2422         if (ret) {
2423                 ath10k_err("cannot enable PCI device: %d\n", ret);
2424                 goto err_ar;
2425         }
2426
2427         /* Request MMIO resources */
2428         ret = pci_request_region(pdev, BAR_NUM, "ath");
2429         if (ret) {
2430                 ath10k_err("PCI MMIO reservation error: %d\n", ret);
2431                 goto err_device;
2432         }
2433
2434         /*
2435          * Target structures have a limit of 32 bit DMA pointers.
2436          * DMA pointers can be wider than 32 bits by default on some systems.
2437          */
2438         ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2439         if (ret) {
2440                 ath10k_err("32-bit DMA not available: %d\n", ret);
2441                 goto err_region;
2442         }
2443
2444         ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2445         if (ret) {
2446                 ath10k_err("cannot enable 32-bit consistent DMA\n");
2447                 goto err_region;
2448         }
2449
2450         /* Set bus master bit in PCI_COMMAND to enable DMA */
2451         pci_set_master(pdev);
2452
2453         /*
2454          * Temporary FIX: disable ASPM
2455          * Will be removed after the OTP is programmed
2456          */
2457         pci_read_config_dword(pdev, 0x80, &lcr_val);
2458         pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2459
2460         /* Arrange for access to Target SoC registers. */
2461         mem = pci_iomap(pdev, BAR_NUM, 0);
2462         if (!mem) {
2463                 ath10k_err("PCI iomap error\n");
2464                 ret = -EIO;
2465                 goto err_master;
2466         }
2467
2468         ar_pci->mem = mem;
2469
2470         spin_lock_init(&ar_pci->ce_lock);
2471
2472         ret = ath10k_do_pci_wake(ar);
2473         if (ret) {
2474                 ath10k_err("Failed to get chip id: %d\n", ret);
2475                 goto err_iomap;
2476         }
2477
2478         chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
2479
2480         ath10k_do_pci_sleep(ar);
2481
2482         ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2483
2484         ret = ath10k_core_register(ar, chip_id);
2485         if (ret) {
2486                 ath10k_err("could not register driver core (%d)\n", ret);
2487                 goto err_iomap;
2488         }
2489
2490         return 0;
2491
2492 err_iomap:
2493         pci_iounmap(pdev, mem);
2494 err_master:
2495         pci_clear_master(pdev);
2496 err_region:
2497         pci_release_region(pdev, BAR_NUM);
2498 err_device:
2499         pci_disable_device(pdev);
2500 err_ar:
2501         ath10k_core_destroy(ar);
2502 err_ar_pci:
2503         /* call HIF PCI free here */
2504         kfree(ar_pci);
2505
2506         return ret;
2507 }
2508
2509 static void ath10k_pci_remove(struct pci_dev *pdev)
2510 {
2511         struct ath10k *ar = pci_get_drvdata(pdev);
2512         struct ath10k_pci *ar_pci;
2513
2514         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2515
2516         if (!ar)
2517                 return;
2518
2519         ar_pci = ath10k_pci_priv(ar);
2520
2521         if (!ar_pci)
2522                 return;
2523
2524         tasklet_kill(&ar_pci->msi_fw_err);
2525
2526         ath10k_core_unregister(ar);
2527
2528         pci_iounmap(pdev, ar_pci->mem);
2529         pci_release_region(pdev, BAR_NUM);
2530         pci_clear_master(pdev);
2531         pci_disable_device(pdev);
2532
2533         ath10k_core_destroy(ar);
2534         kfree(ar_pci);
2535 }
2536
2537 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2538
2539 static struct pci_driver ath10k_pci_driver = {
2540         .name = "ath10k_pci",
2541         .id_table = ath10k_pci_id_table,
2542         .probe = ath10k_pci_probe,
2543         .remove = ath10k_pci_remove,
2544 };
2545
2546 static int __init ath10k_pci_init(void)
2547 {
2548         int ret;
2549
2550         ret = pci_register_driver(&ath10k_pci_driver);
2551         if (ret)
2552                 ath10k_err("pci_register_driver failed [%d]\n", ret);
2553
2554         return ret;
2555 }
2556 module_init(ath10k_pci_init);
2557
2558 static void __exit ath10k_pci_exit(void)
2559 {
2560         pci_unregister_driver(&ath10k_pci_driver);
2561 }
2562
2563 module_exit(ath10k_pci_exit);
2564
2565 MODULE_AUTHOR("Qualcomm Atheros");
2566 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2567 MODULE_LICENSE("Dual BSD/GPL");
2568 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2569 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2570 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);