]> Pileus Git - ~andy/linux/blob - drivers/net/wireless/ath/ath10k/pci.c
ath10k: split up pci irq code
[~andy/linux] / drivers / net / wireless / ath / ath10k / pci.c
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/bitops.h>
23
24 #include "core.h"
25 #include "debug.h"
26
27 #include "targaddrs.h"
28 #include "bmi.h"
29
30 #include "hif.h"
31 #include "htc.h"
32
33 #include "ce.h"
34 #include "pci.h"
35
36 static unsigned int ath10k_target_ps;
37 module_param(ath10k_target_ps, uint, 0644);
38 MODULE_PARM_DESC(ath10k_target_ps, "Enable ath10k Target (SoC) PS option");
39
40 #define QCA988X_2_0_DEVICE_ID   (0x003c)
41
42 static DEFINE_PCI_DEVICE_TABLE(ath10k_pci_id_table) = {
43         { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
44         {0}
45 };
46
47 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
48                                        u32 *data);
49
50 static void ath10k_pci_process_ce(struct ath10k *ar);
51 static int ath10k_pci_post_rx(struct ath10k *ar);
52 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
53                                              int num);
54 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info);
55 static void ath10k_pci_stop_ce(struct ath10k *ar);
56 static int ath10k_pci_device_reset(struct ath10k *ar);
57 static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
58 static int ath10k_pci_init_irq(struct ath10k *ar);
59 static int ath10k_pci_deinit_irq(struct ath10k *ar);
60 static int ath10k_pci_request_irq(struct ath10k *ar);
61 static void ath10k_pci_free_irq(struct ath10k *ar);
62
63 static const struct ce_attr host_ce_config_wlan[] = {
64         /* CE0: host->target HTC control and raw streams */
65         {
66                 .flags = CE_ATTR_FLAGS,
67                 .src_nentries = 16,
68                 .src_sz_max = 256,
69                 .dest_nentries = 0,
70         },
71
72         /* CE1: target->host HTT + HTC control */
73         {
74                 .flags = CE_ATTR_FLAGS,
75                 .src_nentries = 0,
76                 .src_sz_max = 512,
77                 .dest_nentries = 512,
78         },
79
80         /* CE2: target->host WMI */
81         {
82                 .flags = CE_ATTR_FLAGS,
83                 .src_nentries = 0,
84                 .src_sz_max = 2048,
85                 .dest_nentries = 32,
86         },
87
88         /* CE3: host->target WMI */
89         {
90                 .flags = CE_ATTR_FLAGS,
91                 .src_nentries = 32,
92                 .src_sz_max = 2048,
93                 .dest_nentries = 0,
94         },
95
96         /* CE4: host->target HTT */
97         {
98                 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
99                 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
100                 .src_sz_max = 256,
101                 .dest_nentries = 0,
102         },
103
104         /* CE5: unused */
105         {
106                 .flags = CE_ATTR_FLAGS,
107                 .src_nentries = 0,
108                 .src_sz_max = 0,
109                 .dest_nentries = 0,
110         },
111
112         /* CE6: target autonomous hif_memcpy */
113         {
114                 .flags = CE_ATTR_FLAGS,
115                 .src_nentries = 0,
116                 .src_sz_max = 0,
117                 .dest_nentries = 0,
118         },
119
120         /* CE7: ce_diag, the Diagnostic Window */
121         {
122                 .flags = CE_ATTR_FLAGS,
123                 .src_nentries = 2,
124                 .src_sz_max = DIAG_TRANSFER_LIMIT,
125                 .dest_nentries = 2,
126         },
127 };
128
129 /* Target firmware's Copy Engine configuration. */
130 static const struct ce_pipe_config target_ce_config_wlan[] = {
131         /* CE0: host->target HTC control and raw streams */
132         {
133                 .pipenum = 0,
134                 .pipedir = PIPEDIR_OUT,
135                 .nentries = 32,
136                 .nbytes_max = 256,
137                 .flags = CE_ATTR_FLAGS,
138                 .reserved = 0,
139         },
140
141         /* CE1: target->host HTT + HTC control */
142         {
143                 .pipenum = 1,
144                 .pipedir = PIPEDIR_IN,
145                 .nentries = 32,
146                 .nbytes_max = 512,
147                 .flags = CE_ATTR_FLAGS,
148                 .reserved = 0,
149         },
150
151         /* CE2: target->host WMI */
152         {
153                 .pipenum = 2,
154                 .pipedir = PIPEDIR_IN,
155                 .nentries = 32,
156                 .nbytes_max = 2048,
157                 .flags = CE_ATTR_FLAGS,
158                 .reserved = 0,
159         },
160
161         /* CE3: host->target WMI */
162         {
163                 .pipenum = 3,
164                 .pipedir = PIPEDIR_OUT,
165                 .nentries = 32,
166                 .nbytes_max = 2048,
167                 .flags = CE_ATTR_FLAGS,
168                 .reserved = 0,
169         },
170
171         /* CE4: host->target HTT */
172         {
173                 .pipenum = 4,
174                 .pipedir = PIPEDIR_OUT,
175                 .nentries = 256,
176                 .nbytes_max = 256,
177                 .flags = CE_ATTR_FLAGS,
178                 .reserved = 0,
179         },
180
181         /* NB: 50% of src nentries, since tx has 2 frags */
182
183         /* CE5: unused */
184         {
185                 .pipenum = 5,
186                 .pipedir = PIPEDIR_OUT,
187                 .nentries = 32,
188                 .nbytes_max = 2048,
189                 .flags = CE_ATTR_FLAGS,
190                 .reserved = 0,
191         },
192
193         /* CE6: Reserved for target autonomous hif_memcpy */
194         {
195                 .pipenum = 6,
196                 .pipedir = PIPEDIR_INOUT,
197                 .nentries = 32,
198                 .nbytes_max = 4096,
199                 .flags = CE_ATTR_FLAGS,
200                 .reserved = 0,
201         },
202
203         /* CE7 used only by Host */
204 };
205
206 static bool ath10k_pci_irq_pending(struct ath10k *ar)
207 {
208         u32 cause;
209
210         /* Check if the shared legacy irq is for us */
211         cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
212                                   PCIE_INTR_CAUSE_ADDRESS);
213         if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
214                 return true;
215
216         return false;
217 }
218
219 /*
220  * Diagnostic read/write access is provided for startup/config/debug usage.
221  * Caller must guarantee proper alignment, when applicable, and single user
222  * at any moment.
223  */
224 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
225                                     int nbytes)
226 {
227         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
228         int ret = 0;
229         u32 buf;
230         unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
231         unsigned int id;
232         unsigned int flags;
233         struct ath10k_ce_pipe *ce_diag;
234         /* Host buffer address in CE space */
235         u32 ce_data;
236         dma_addr_t ce_data_base = 0;
237         void *data_buf = NULL;
238         int i;
239
240         /*
241          * This code cannot handle reads to non-memory space. Redirect to the
242          * register read fn but preserve the multi word read capability of
243          * this fn
244          */
245         if (address < DRAM_BASE_ADDRESS) {
246                 if (!IS_ALIGNED(address, 4) ||
247                     !IS_ALIGNED((unsigned long)data, 4))
248                         return -EIO;
249
250                 while ((nbytes >= 4) &&  ((ret = ath10k_pci_diag_read_access(
251                                            ar, address, (u32 *)data)) == 0)) {
252                         nbytes -= sizeof(u32);
253                         address += sizeof(u32);
254                         data += sizeof(u32);
255                 }
256                 return ret;
257         }
258
259         ce_diag = ar_pci->ce_diag;
260
261         /*
262          * Allocate a temporary bounce buffer to hold caller's data
263          * to be DMA'ed from Target. This guarantees
264          *   1) 4-byte alignment
265          *   2) Buffer in DMA-able space
266          */
267         orig_nbytes = nbytes;
268         data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
269                                                          orig_nbytes,
270                                                          &ce_data_base);
271
272         if (!data_buf) {
273                 ret = -ENOMEM;
274                 goto done;
275         }
276         memset(data_buf, 0, orig_nbytes);
277
278         remaining_bytes = orig_nbytes;
279         ce_data = ce_data_base;
280         while (remaining_bytes) {
281                 nbytes = min_t(unsigned int, remaining_bytes,
282                                DIAG_TRANSFER_LIMIT);
283
284                 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, ce_data);
285                 if (ret != 0)
286                         goto done;
287
288                 /* Request CE to send from Target(!) address to Host buffer */
289                 /*
290                  * The address supplied by the caller is in the
291                  * Target CPU virtual address space.
292                  *
293                  * In order to use this address with the diagnostic CE,
294                  * convert it from Target CPU virtual address space
295                  * to CE address space
296                  */
297                 ath10k_pci_wake(ar);
298                 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
299                                                      address);
300                 ath10k_pci_sleep(ar);
301
302                 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
303                                  0);
304                 if (ret)
305                         goto done;
306
307                 i = 0;
308                 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
309                                                      &completed_nbytes,
310                                                      &id) != 0) {
311                         mdelay(1);
312                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
313                                 ret = -EBUSY;
314                                 goto done;
315                         }
316                 }
317
318                 if (nbytes != completed_nbytes) {
319                         ret = -EIO;
320                         goto done;
321                 }
322
323                 if (buf != (u32) address) {
324                         ret = -EIO;
325                         goto done;
326                 }
327
328                 i = 0;
329                 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
330                                                      &completed_nbytes,
331                                                      &id, &flags) != 0) {
332                         mdelay(1);
333
334                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
335                                 ret = -EBUSY;
336                                 goto done;
337                         }
338                 }
339
340                 if (nbytes != completed_nbytes) {
341                         ret = -EIO;
342                         goto done;
343                 }
344
345                 if (buf != ce_data) {
346                         ret = -EIO;
347                         goto done;
348                 }
349
350                 remaining_bytes -= nbytes;
351                 address += nbytes;
352                 ce_data += nbytes;
353         }
354
355 done:
356         if (ret == 0) {
357                 /* Copy data from allocated DMA buf to caller's buf */
358                 WARN_ON_ONCE(orig_nbytes & 3);
359                 for (i = 0; i < orig_nbytes / sizeof(__le32); i++) {
360                         ((u32 *)data)[i] =
361                                 __le32_to_cpu(((__le32 *)data_buf)[i]);
362                 }
363         } else
364                 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n",
365                            __func__, address);
366
367         if (data_buf)
368                 pci_free_consistent(ar_pci->pdev, orig_nbytes,
369                                     data_buf, ce_data_base);
370
371         return ret;
372 }
373
374 /* Read 4-byte aligned data from Target memory or register */
375 static int ath10k_pci_diag_read_access(struct ath10k *ar, u32 address,
376                                        u32 *data)
377 {
378         /* Assume range doesn't cross this boundary */
379         if (address >= DRAM_BASE_ADDRESS)
380                 return ath10k_pci_diag_read_mem(ar, address, data, sizeof(u32));
381
382         ath10k_pci_wake(ar);
383         *data = ath10k_pci_read32(ar, address);
384         ath10k_pci_sleep(ar);
385         return 0;
386 }
387
388 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
389                                      const void *data, int nbytes)
390 {
391         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
392         int ret = 0;
393         u32 buf;
394         unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
395         unsigned int id;
396         unsigned int flags;
397         struct ath10k_ce_pipe *ce_diag;
398         void *data_buf = NULL;
399         u32 ce_data;    /* Host buffer address in CE space */
400         dma_addr_t ce_data_base = 0;
401         int i;
402
403         ce_diag = ar_pci->ce_diag;
404
405         /*
406          * Allocate a temporary bounce buffer to hold caller's data
407          * to be DMA'ed to Target. This guarantees
408          *   1) 4-byte alignment
409          *   2) Buffer in DMA-able space
410          */
411         orig_nbytes = nbytes;
412         data_buf = (unsigned char *)pci_alloc_consistent(ar_pci->pdev,
413                                                          orig_nbytes,
414                                                          &ce_data_base);
415         if (!data_buf) {
416                 ret = -ENOMEM;
417                 goto done;
418         }
419
420         /* Copy caller's data to allocated DMA buf */
421         WARN_ON_ONCE(orig_nbytes & 3);
422         for (i = 0; i < orig_nbytes / sizeof(__le32); i++)
423                 ((__le32 *)data_buf)[i] = __cpu_to_le32(((u32 *)data)[i]);
424
425         /*
426          * The address supplied by the caller is in the
427          * Target CPU virtual address space.
428          *
429          * In order to use this address with the diagnostic CE,
430          * convert it from
431          *    Target CPU virtual address space
432          * to
433          *    CE address space
434          */
435         ath10k_pci_wake(ar);
436         address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
437         ath10k_pci_sleep(ar);
438
439         remaining_bytes = orig_nbytes;
440         ce_data = ce_data_base;
441         while (remaining_bytes) {
442                 /* FIXME: check cast */
443                 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
444
445                 /* Set up to receive directly into Target(!) address */
446                 ret = ath10k_ce_recv_buf_enqueue(ce_diag, NULL, address);
447                 if (ret != 0)
448                         goto done;
449
450                 /*
451                  * Request CE to send caller-supplied data that
452                  * was copied to bounce buffer to Target(!) address.
453                  */
454                 ret = ath10k_ce_send(ce_diag, NULL, (u32) ce_data,
455                                      nbytes, 0, 0);
456                 if (ret != 0)
457                         goto done;
458
459                 i = 0;
460                 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
461                                                      &completed_nbytes,
462                                                      &id) != 0) {
463                         mdelay(1);
464
465                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
466                                 ret = -EBUSY;
467                                 goto done;
468                         }
469                 }
470
471                 if (nbytes != completed_nbytes) {
472                         ret = -EIO;
473                         goto done;
474                 }
475
476                 if (buf != ce_data) {
477                         ret = -EIO;
478                         goto done;
479                 }
480
481                 i = 0;
482                 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
483                                                      &completed_nbytes,
484                                                      &id, &flags) != 0) {
485                         mdelay(1);
486
487                         if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
488                                 ret = -EBUSY;
489                                 goto done;
490                         }
491                 }
492
493                 if (nbytes != completed_nbytes) {
494                         ret = -EIO;
495                         goto done;
496                 }
497
498                 if (buf != address) {
499                         ret = -EIO;
500                         goto done;
501                 }
502
503                 remaining_bytes -= nbytes;
504                 address += nbytes;
505                 ce_data += nbytes;
506         }
507
508 done:
509         if (data_buf) {
510                 pci_free_consistent(ar_pci->pdev, orig_nbytes, data_buf,
511                                     ce_data_base);
512         }
513
514         if (ret != 0)
515                 ath10k_dbg(ATH10K_DBG_PCI, "%s failure (0x%x)\n", __func__,
516                            address);
517
518         return ret;
519 }
520
521 /* Write 4B data to Target memory or register */
522 static int ath10k_pci_diag_write_access(struct ath10k *ar, u32 address,
523                                         u32 data)
524 {
525         /* Assume range doesn't cross this boundary */
526         if (address >= DRAM_BASE_ADDRESS)
527                 return ath10k_pci_diag_write_mem(ar, address, &data,
528                                                  sizeof(u32));
529
530         ath10k_pci_wake(ar);
531         ath10k_pci_write32(ar, address, data);
532         ath10k_pci_sleep(ar);
533         return 0;
534 }
535
536 static bool ath10k_pci_target_is_awake(struct ath10k *ar)
537 {
538         void __iomem *mem = ath10k_pci_priv(ar)->mem;
539         u32 val;
540         val = ioread32(mem + PCIE_LOCAL_BASE_ADDRESS +
541                        RTC_STATE_ADDRESS);
542         return (RTC_STATE_V_GET(val) == RTC_STATE_V_ON);
543 }
544
545 int ath10k_do_pci_wake(struct ath10k *ar)
546 {
547         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
548         void __iomem *pci_addr = ar_pci->mem;
549         int tot_delay = 0;
550         int curr_delay = 5;
551
552         if (atomic_read(&ar_pci->keep_awake_count) == 0) {
553                 /* Force AWAKE */
554                 iowrite32(PCIE_SOC_WAKE_V_MASK,
555                           pci_addr + PCIE_LOCAL_BASE_ADDRESS +
556                           PCIE_SOC_WAKE_ADDRESS);
557         }
558         atomic_inc(&ar_pci->keep_awake_count);
559
560         if (ar_pci->verified_awake)
561                 return 0;
562
563         for (;;) {
564                 if (ath10k_pci_target_is_awake(ar)) {
565                         ar_pci->verified_awake = true;
566                         return 0;
567                 }
568
569                 if (tot_delay > PCIE_WAKE_TIMEOUT) {
570                         ath10k_warn("target took longer %d us to wake up (awake count %d)\n",
571                                     PCIE_WAKE_TIMEOUT,
572                                     atomic_read(&ar_pci->keep_awake_count));
573                         return -ETIMEDOUT;
574                 }
575
576                 udelay(curr_delay);
577                 tot_delay += curr_delay;
578
579                 if (curr_delay < 50)
580                         curr_delay += 5;
581         }
582 }
583
584 void ath10k_do_pci_sleep(struct ath10k *ar)
585 {
586         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
587         void __iomem *pci_addr = ar_pci->mem;
588
589         if (atomic_dec_and_test(&ar_pci->keep_awake_count)) {
590                 /* Allow sleep */
591                 ar_pci->verified_awake = false;
592                 iowrite32(PCIE_SOC_WAKE_RESET,
593                           pci_addr + PCIE_LOCAL_BASE_ADDRESS +
594                           PCIE_SOC_WAKE_ADDRESS);
595         }
596 }
597
598 /*
599  * FIXME: Handle OOM properly.
600  */
601 static inline
602 struct ath10k_pci_compl *get_free_compl(struct ath10k_pci_pipe *pipe_info)
603 {
604         struct ath10k_pci_compl *compl = NULL;
605
606         spin_lock_bh(&pipe_info->pipe_lock);
607         if (list_empty(&pipe_info->compl_free)) {
608                 ath10k_warn("Completion buffers are full\n");
609                 goto exit;
610         }
611         compl = list_first_entry(&pipe_info->compl_free,
612                                  struct ath10k_pci_compl, list);
613         list_del(&compl->list);
614 exit:
615         spin_unlock_bh(&pipe_info->pipe_lock);
616         return compl;
617 }
618
619 /* Called by lower (CE) layer when a send to Target completes. */
620 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
621 {
622         struct ath10k *ar = ce_state->ar;
623         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
624         struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
625         struct ath10k_pci_compl *compl;
626         void *transfer_context;
627         u32 ce_data;
628         unsigned int nbytes;
629         unsigned int transfer_id;
630
631         while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
632                                              &ce_data, &nbytes,
633                                              &transfer_id) == 0) {
634                 compl = get_free_compl(pipe_info);
635                 if (!compl)
636                         break;
637
638                 compl->state = ATH10K_PCI_COMPL_SEND;
639                 compl->ce_state = ce_state;
640                 compl->pipe_info = pipe_info;
641                 compl->skb = transfer_context;
642                 compl->nbytes = nbytes;
643                 compl->transfer_id = transfer_id;
644                 compl->flags = 0;
645
646                 /*
647                  * Add the completion to the processing queue.
648                  */
649                 spin_lock_bh(&ar_pci->compl_lock);
650                 list_add_tail(&compl->list, &ar_pci->compl_process);
651                 spin_unlock_bh(&ar_pci->compl_lock);
652         }
653
654         ath10k_pci_process_ce(ar);
655 }
656
657 /* Called by lower (CE) layer when data is received from the Target. */
658 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
659 {
660         struct ath10k *ar = ce_state->ar;
661         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
662         struct ath10k_pci_pipe *pipe_info =  &ar_pci->pipe_info[ce_state->id];
663         struct ath10k_pci_compl *compl;
664         struct sk_buff *skb;
665         void *transfer_context;
666         u32 ce_data;
667         unsigned int nbytes;
668         unsigned int transfer_id;
669         unsigned int flags;
670
671         while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
672                                              &ce_data, &nbytes, &transfer_id,
673                                              &flags) == 0) {
674                 compl = get_free_compl(pipe_info);
675                 if (!compl)
676                         break;
677
678                 compl->state = ATH10K_PCI_COMPL_RECV;
679                 compl->ce_state = ce_state;
680                 compl->pipe_info = pipe_info;
681                 compl->skb = transfer_context;
682                 compl->nbytes = nbytes;
683                 compl->transfer_id = transfer_id;
684                 compl->flags = flags;
685
686                 skb = transfer_context;
687                 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
688                                  skb->len + skb_tailroom(skb),
689                                  DMA_FROM_DEVICE);
690                 /*
691                  * Add the completion to the processing queue.
692                  */
693                 spin_lock_bh(&ar_pci->compl_lock);
694                 list_add_tail(&compl->list, &ar_pci->compl_process);
695                 spin_unlock_bh(&ar_pci->compl_lock);
696         }
697
698         ath10k_pci_process_ce(ar);
699 }
700
701 /* Send the first nbytes bytes of the buffer */
702 static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
703                                     unsigned int transfer_id,
704                                     unsigned int bytes, struct sk_buff *nbuf)
705 {
706         struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(nbuf);
707         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
708         struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe_id]);
709         struct ath10k_ce_pipe *ce_hdl = pipe_info->ce_hdl;
710         unsigned int len;
711         u32 flags = 0;
712         int ret;
713
714         len = min(bytes, nbuf->len);
715         bytes -= len;
716
717         if (len & 3)
718                 ath10k_warn("skb not aligned to 4-byte boundary (%d)\n", len);
719
720         ath10k_dbg(ATH10K_DBG_PCI,
721                    "pci send data vaddr %p paddr 0x%llx len %d as %d bytes\n",
722                    nbuf->data, (unsigned long long) skb_cb->paddr,
723                    nbuf->len, len);
724         ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
725                         "ath10k tx: data: ",
726                         nbuf->data, nbuf->len);
727
728         ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
729                              flags);
730         if (ret)
731                 ath10k_warn("failed to send sk_buff to CE: %p\n", nbuf);
732
733         return ret;
734 }
735
736 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
737 {
738         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
739         return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
740 }
741
742 static void ath10k_pci_hif_dump_area(struct ath10k *ar)
743 {
744         u32 reg_dump_area = 0;
745         u32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
746         u32 host_addr;
747         int ret;
748         u32 i;
749
750         ath10k_err("firmware crashed!\n");
751         ath10k_err("hardware name %s version 0x%x\n",
752                    ar->hw_params.name, ar->target_version);
753         ath10k_err("firmware version: %u.%u.%u.%u\n", ar->fw_version_major,
754                    ar->fw_version_minor, ar->fw_version_release,
755                    ar->fw_version_build);
756
757         host_addr = host_interest_item_address(HI_ITEM(hi_failure_state));
758         ret = ath10k_pci_diag_read_mem(ar, host_addr,
759                                        &reg_dump_area, sizeof(u32));
760         if (ret) {
761                 ath10k_err("failed to read FW dump area address: %d\n", ret);
762                 return;
763         }
764
765         ath10k_err("target register Dump Location: 0x%08X\n", reg_dump_area);
766
767         ret = ath10k_pci_diag_read_mem(ar, reg_dump_area,
768                                        &reg_dump_values[0],
769                                        REG_DUMP_COUNT_QCA988X * sizeof(u32));
770         if (ret != 0) {
771                 ath10k_err("failed to read FW dump area: %d\n", ret);
772                 return;
773         }
774
775         BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
776
777         ath10k_err("target Register Dump\n");
778         for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
779                 ath10k_err("[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
780                            i,
781                            reg_dump_values[i],
782                            reg_dump_values[i + 1],
783                            reg_dump_values[i + 2],
784                            reg_dump_values[i + 3]);
785
786         queue_work(ar->workqueue, &ar->restart_work);
787 }
788
789 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
790                                                int force)
791 {
792         if (!force) {
793                 int resources;
794                 /*
795                  * Decide whether to actually poll for completions, or just
796                  * wait for a later chance.
797                  * If there seem to be plenty of resources left, then just wait
798                  * since checking involves reading a CE register, which is a
799                  * relatively expensive operation.
800                  */
801                 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
802
803                 /*
804                  * If at least 50% of the total resources are still available,
805                  * don't bother checking again yet.
806                  */
807                 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
808                         return;
809         }
810         ath10k_ce_per_engine_service(ar, pipe);
811 }
812
813 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
814                                          struct ath10k_hif_cb *callbacks)
815 {
816         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
817
818         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
819
820         memcpy(&ar_pci->msg_callbacks_current, callbacks,
821                sizeof(ar_pci->msg_callbacks_current));
822 }
823
824 static int ath10k_pci_start_ce(struct ath10k *ar)
825 {
826         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
827         struct ath10k_ce_pipe *ce_diag = ar_pci->ce_diag;
828         const struct ce_attr *attr;
829         struct ath10k_pci_pipe *pipe_info;
830         struct ath10k_pci_compl *compl;
831         int i, pipe_num, completions, disable_interrupts;
832
833         spin_lock_init(&ar_pci->compl_lock);
834         INIT_LIST_HEAD(&ar_pci->compl_process);
835
836         for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
837                 pipe_info = &ar_pci->pipe_info[pipe_num];
838
839                 spin_lock_init(&pipe_info->pipe_lock);
840                 INIT_LIST_HEAD(&pipe_info->compl_free);
841
842                 /* Handle Diagnostic CE specially */
843                 if (pipe_info->ce_hdl == ce_diag)
844                         continue;
845
846                 attr = &host_ce_config_wlan[pipe_num];
847                 completions = 0;
848
849                 if (attr->src_nentries) {
850                         disable_interrupts = attr->flags & CE_ATTR_DIS_INTR;
851                         ath10k_ce_send_cb_register(pipe_info->ce_hdl,
852                                                    ath10k_pci_ce_send_done,
853                                                    disable_interrupts);
854                         completions += attr->src_nentries;
855                 }
856
857                 if (attr->dest_nentries) {
858                         ath10k_ce_recv_cb_register(pipe_info->ce_hdl,
859                                                    ath10k_pci_ce_recv_data);
860                         completions += attr->dest_nentries;
861                 }
862
863                 if (completions == 0)
864                         continue;
865
866                 for (i = 0; i < completions; i++) {
867                         compl = kmalloc(sizeof(*compl), GFP_KERNEL);
868                         if (!compl) {
869                                 ath10k_warn("No memory for completion state\n");
870                                 ath10k_pci_stop_ce(ar);
871                                 return -ENOMEM;
872                         }
873
874                         compl->state = ATH10K_PCI_COMPL_FREE;
875                         list_add_tail(&compl->list, &pipe_info->compl_free);
876                 }
877         }
878
879         return 0;
880 }
881
882 static void ath10k_pci_kill_tasklet(struct ath10k *ar)
883 {
884         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
885         int i;
886
887         tasklet_kill(&ar_pci->intr_tq);
888         tasklet_kill(&ar_pci->msi_fw_err);
889
890         for (i = 0; i < CE_COUNT; i++)
891                 tasklet_kill(&ar_pci->pipe_info[i].intr);
892 }
893
894 static void ath10k_pci_stop_ce(struct ath10k *ar)
895 {
896         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
897         struct ath10k_pci_compl *compl;
898         struct sk_buff *skb;
899         int ret;
900
901         ret = ath10k_ce_disable_interrupts(ar);
902         if (ret)
903                 ath10k_warn("failed to disable CE interrupts: %d\n", ret);
904
905         ath10k_pci_kill_tasklet(ar);
906
907         /* Mark pending completions as aborted, so that upper layers free up
908          * their associated resources */
909         spin_lock_bh(&ar_pci->compl_lock);
910         list_for_each_entry(compl, &ar_pci->compl_process, list) {
911                 skb = compl->skb;
912                 ATH10K_SKB_CB(skb)->is_aborted = true;
913         }
914         spin_unlock_bh(&ar_pci->compl_lock);
915 }
916
917 static void ath10k_pci_cleanup_ce(struct ath10k *ar)
918 {
919         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
920         struct ath10k_pci_compl *compl, *tmp;
921         struct ath10k_pci_pipe *pipe_info;
922         struct sk_buff *netbuf;
923         int pipe_num;
924
925         /* Free pending completions. */
926         spin_lock_bh(&ar_pci->compl_lock);
927         if (!list_empty(&ar_pci->compl_process))
928                 ath10k_warn("pending completions still present! possible memory leaks.\n");
929
930         list_for_each_entry_safe(compl, tmp, &ar_pci->compl_process, list) {
931                 list_del(&compl->list);
932                 netbuf = compl->skb;
933                 dev_kfree_skb_any(netbuf);
934                 kfree(compl);
935         }
936         spin_unlock_bh(&ar_pci->compl_lock);
937
938         /* Free unused completions for each pipe. */
939         for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
940                 pipe_info = &ar_pci->pipe_info[pipe_num];
941
942                 spin_lock_bh(&pipe_info->pipe_lock);
943                 list_for_each_entry_safe(compl, tmp,
944                                          &pipe_info->compl_free, list) {
945                         list_del(&compl->list);
946                         kfree(compl);
947                 }
948                 spin_unlock_bh(&pipe_info->pipe_lock);
949         }
950 }
951
952 static void ath10k_pci_process_ce(struct ath10k *ar)
953 {
954         struct ath10k_pci *ar_pci = ar->hif.priv;
955         struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
956         struct ath10k_pci_compl *compl;
957         struct sk_buff *skb;
958         unsigned int nbytes;
959         int ret, send_done = 0;
960
961         /* Upper layers aren't ready to handle tx/rx completions in parallel so
962          * we must serialize all completion processing. */
963
964         spin_lock_bh(&ar_pci->compl_lock);
965         if (ar_pci->compl_processing) {
966                 spin_unlock_bh(&ar_pci->compl_lock);
967                 return;
968         }
969         ar_pci->compl_processing = true;
970         spin_unlock_bh(&ar_pci->compl_lock);
971
972         for (;;) {
973                 spin_lock_bh(&ar_pci->compl_lock);
974                 if (list_empty(&ar_pci->compl_process)) {
975                         spin_unlock_bh(&ar_pci->compl_lock);
976                         break;
977                 }
978                 compl = list_first_entry(&ar_pci->compl_process,
979                                          struct ath10k_pci_compl, list);
980                 list_del(&compl->list);
981                 spin_unlock_bh(&ar_pci->compl_lock);
982
983                 switch (compl->state) {
984                 case ATH10K_PCI_COMPL_SEND:
985                         cb->tx_completion(ar,
986                                           compl->skb,
987                                           compl->transfer_id);
988                         send_done = 1;
989                         break;
990                 case ATH10K_PCI_COMPL_RECV:
991                         ret = ath10k_pci_post_rx_pipe(compl->pipe_info, 1);
992                         if (ret) {
993                                 ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
994                                             compl->pipe_info->pipe_num, ret);
995                                 break;
996                         }
997
998                         skb = compl->skb;
999                         nbytes = compl->nbytes;
1000
1001                         ath10k_dbg(ATH10K_DBG_PCI,
1002                                    "ath10k_pci_ce_recv_data netbuf=%p  nbytes=%d\n",
1003                                    skb, nbytes);
1004                         ath10k_dbg_dump(ATH10K_DBG_PCI_DUMP, NULL,
1005                                         "ath10k rx: ", skb->data, nbytes);
1006
1007                         if (skb->len + skb_tailroom(skb) >= nbytes) {
1008                                 skb_trim(skb, 0);
1009                                 skb_put(skb, nbytes);
1010                                 cb->rx_completion(ar, skb,
1011                                                   compl->pipe_info->pipe_num);
1012                         } else {
1013                                 ath10k_warn("rxed more than expected (nbytes %d, max %d)",
1014                                             nbytes,
1015                                             skb->len + skb_tailroom(skb));
1016                         }
1017                         break;
1018                 case ATH10K_PCI_COMPL_FREE:
1019                         ath10k_warn("free completion cannot be processed\n");
1020                         break;
1021                 default:
1022                         ath10k_warn("invalid completion state (%d)\n",
1023                                     compl->state);
1024                         break;
1025                 }
1026
1027                 compl->state = ATH10K_PCI_COMPL_FREE;
1028
1029                 /*
1030                  * Add completion back to the pipe's free list.
1031                  */
1032                 spin_lock_bh(&compl->pipe_info->pipe_lock);
1033                 list_add_tail(&compl->list, &compl->pipe_info->compl_free);
1034                 spin_unlock_bh(&compl->pipe_info->pipe_lock);
1035         }
1036
1037         spin_lock_bh(&ar_pci->compl_lock);
1038         ar_pci->compl_processing = false;
1039         spin_unlock_bh(&ar_pci->compl_lock);
1040 }
1041
1042 /* TODO - temporary mapping while we have too few CE's */
1043 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1044                                               u16 service_id, u8 *ul_pipe,
1045                                               u8 *dl_pipe, int *ul_is_polled,
1046                                               int *dl_is_polled)
1047 {
1048         int ret = 0;
1049
1050         /* polling for received messages not supported */
1051         *dl_is_polled = 0;
1052
1053         switch (service_id) {
1054         case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
1055                 /*
1056                  * Host->target HTT gets its own pipe, so it can be polled
1057                  * while other pipes are interrupt driven.
1058                  */
1059                 *ul_pipe = 4;
1060                 /*
1061                  * Use the same target->host pipe for HTC ctrl, HTC raw
1062                  * streams, and HTT.
1063                  */
1064                 *dl_pipe = 1;
1065                 break;
1066
1067         case ATH10K_HTC_SVC_ID_RSVD_CTRL:
1068         case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
1069                 /*
1070                  * Note: HTC_RAW_STREAMS_SVC is currently unused, and
1071                  * HTC_CTRL_RSVD_SVC could share the same pipe as the
1072                  * WMI services.  So, if another CE is needed, change
1073                  * this to *ul_pipe = 3, which frees up CE 0.
1074                  */
1075                 /* *ul_pipe = 3; */
1076                 *ul_pipe = 0;
1077                 *dl_pipe = 1;
1078                 break;
1079
1080         case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
1081         case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
1082         case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
1083         case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
1084
1085         case ATH10K_HTC_SVC_ID_WMI_CONTROL:
1086                 *ul_pipe = 3;
1087                 *dl_pipe = 2;
1088                 break;
1089
1090                 /* pipe 5 unused   */
1091                 /* pipe 6 reserved */
1092                 /* pipe 7 reserved */
1093
1094         default:
1095                 ret = -1;
1096                 break;
1097         }
1098         *ul_is_polled =
1099                 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1100
1101         return ret;
1102 }
1103
1104 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1105                                                 u8 *ul_pipe, u8 *dl_pipe)
1106 {
1107         int ul_is_polled, dl_is_polled;
1108
1109         (void)ath10k_pci_hif_map_service_to_pipe(ar,
1110                                                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1111                                                  ul_pipe,
1112                                                  dl_pipe,
1113                                                  &ul_is_polled,
1114                                                  &dl_is_polled);
1115 }
1116
1117 static int ath10k_pci_post_rx_pipe(struct ath10k_pci_pipe *pipe_info,
1118                                    int num)
1119 {
1120         struct ath10k *ar = pipe_info->hif_ce_state;
1121         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1122         struct ath10k_ce_pipe *ce_state = pipe_info->ce_hdl;
1123         struct sk_buff *skb;
1124         dma_addr_t ce_data;
1125         int i, ret = 0;
1126
1127         if (pipe_info->buf_sz == 0)
1128                 return 0;
1129
1130         for (i = 0; i < num; i++) {
1131                 skb = dev_alloc_skb(pipe_info->buf_sz);
1132                 if (!skb) {
1133                         ath10k_warn("failed to allocate skbuff for pipe %d\n",
1134                                     num);
1135                         ret = -ENOMEM;
1136                         goto err;
1137                 }
1138
1139                 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
1140
1141                 ce_data = dma_map_single(ar->dev, skb->data,
1142                                          skb->len + skb_tailroom(skb),
1143                                          DMA_FROM_DEVICE);
1144
1145                 if (unlikely(dma_mapping_error(ar->dev, ce_data))) {
1146                         ath10k_warn("failed to DMA map sk_buff\n");
1147                         dev_kfree_skb_any(skb);
1148                         ret = -EIO;
1149                         goto err;
1150                 }
1151
1152                 ATH10K_SKB_CB(skb)->paddr = ce_data;
1153
1154                 pci_dma_sync_single_for_device(ar_pci->pdev, ce_data,
1155                                                pipe_info->buf_sz,
1156                                                PCI_DMA_FROMDEVICE);
1157
1158                 ret = ath10k_ce_recv_buf_enqueue(ce_state, (void *)skb,
1159                                                  ce_data);
1160                 if (ret) {
1161                         ath10k_warn("failed to enqueue to pipe %d: %d\n",
1162                                     num, ret);
1163                         goto err;
1164                 }
1165         }
1166
1167         return ret;
1168
1169 err:
1170         ath10k_pci_rx_pipe_cleanup(pipe_info);
1171         return ret;
1172 }
1173
1174 static int ath10k_pci_post_rx(struct ath10k *ar)
1175 {
1176         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1177         struct ath10k_pci_pipe *pipe_info;
1178         const struct ce_attr *attr;
1179         int pipe_num, ret = 0;
1180
1181         for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1182                 pipe_info = &ar_pci->pipe_info[pipe_num];
1183                 attr = &host_ce_config_wlan[pipe_num];
1184
1185                 if (attr->dest_nentries == 0)
1186                         continue;
1187
1188                 ret = ath10k_pci_post_rx_pipe(pipe_info,
1189                                               attr->dest_nentries - 1);
1190                 if (ret) {
1191                         ath10k_warn("failed to post RX buffer for pipe %d: %d\n",
1192                                     pipe_num, ret);
1193
1194                         for (; pipe_num >= 0; pipe_num--) {
1195                                 pipe_info = &ar_pci->pipe_info[pipe_num];
1196                                 ath10k_pci_rx_pipe_cleanup(pipe_info);
1197                         }
1198                         return ret;
1199                 }
1200         }
1201
1202         return 0;
1203 }
1204
1205 static int ath10k_pci_hif_start(struct ath10k *ar)
1206 {
1207         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1208         int ret;
1209
1210         ret = ath10k_pci_start_ce(ar);
1211         if (ret) {
1212                 ath10k_warn("failed to start CE: %d\n", ret);
1213                 return ret;
1214         }
1215
1216         /* Post buffers once to start things off. */
1217         ret = ath10k_pci_post_rx(ar);
1218         if (ret) {
1219                 ath10k_warn("failed to post RX buffers for all pipes: %d\n",
1220                             ret);
1221                 return ret;
1222         }
1223
1224         ar_pci->started = 1;
1225         return 0;
1226 }
1227
1228 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1229 {
1230         struct ath10k *ar;
1231         struct ath10k_pci *ar_pci;
1232         struct ath10k_ce_pipe *ce_hdl;
1233         u32 buf_sz;
1234         struct sk_buff *netbuf;
1235         u32 ce_data;
1236
1237         buf_sz = pipe_info->buf_sz;
1238
1239         /* Unused Copy Engine */
1240         if (buf_sz == 0)
1241                 return;
1242
1243         ar = pipe_info->hif_ce_state;
1244         ar_pci = ath10k_pci_priv(ar);
1245
1246         if (!ar_pci->started)
1247                 return;
1248
1249         ce_hdl = pipe_info->ce_hdl;
1250
1251         while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1252                                           &ce_data) == 0) {
1253                 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1254                                  netbuf->len + skb_tailroom(netbuf),
1255                                  DMA_FROM_DEVICE);
1256                 dev_kfree_skb_any(netbuf);
1257         }
1258 }
1259
1260 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1261 {
1262         struct ath10k *ar;
1263         struct ath10k_pci *ar_pci;
1264         struct ath10k_ce_pipe *ce_hdl;
1265         struct sk_buff *netbuf;
1266         u32 ce_data;
1267         unsigned int nbytes;
1268         unsigned int id;
1269         u32 buf_sz;
1270
1271         buf_sz = pipe_info->buf_sz;
1272
1273         /* Unused Copy Engine */
1274         if (buf_sz == 0)
1275                 return;
1276
1277         ar = pipe_info->hif_ce_state;
1278         ar_pci = ath10k_pci_priv(ar);
1279
1280         if (!ar_pci->started)
1281                 return;
1282
1283         ce_hdl = pipe_info->ce_hdl;
1284
1285         while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1286                                           &ce_data, &nbytes, &id) == 0) {
1287                 /*
1288                  * Indicate the completion to higer layer to free
1289                  * the buffer
1290                  */
1291
1292                 if (!netbuf) {
1293                         ath10k_warn("invalid sk_buff on CE %d - NULL pointer. firmware crashed?\n",
1294                                     ce_hdl->id);
1295                         continue;
1296                 }
1297
1298                 ATH10K_SKB_CB(netbuf)->is_aborted = true;
1299                 ar_pci->msg_callbacks_current.tx_completion(ar,
1300                                                             netbuf,
1301                                                             id);
1302         }
1303 }
1304
1305 /*
1306  * Cleanup residual buffers for device shutdown:
1307  *    buffers that were enqueued for receive
1308  *    buffers that were to be sent
1309  * Note: Buffers that had completed but which were
1310  * not yet processed are on a completion queue. They
1311  * are handled when the completion thread shuts down.
1312  */
1313 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1314 {
1315         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1316         int pipe_num;
1317
1318         for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1319                 struct ath10k_pci_pipe *pipe_info;
1320
1321                 pipe_info = &ar_pci->pipe_info[pipe_num];
1322                 ath10k_pci_rx_pipe_cleanup(pipe_info);
1323                 ath10k_pci_tx_pipe_cleanup(pipe_info);
1324         }
1325 }
1326
1327 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1328 {
1329         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1330         struct ath10k_pci_pipe *pipe_info;
1331         int pipe_num;
1332
1333         for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1334                 pipe_info = &ar_pci->pipe_info[pipe_num];
1335                 if (pipe_info->ce_hdl) {
1336                         ath10k_ce_deinit(pipe_info->ce_hdl);
1337                         pipe_info->ce_hdl = NULL;
1338                         pipe_info->buf_sz = 0;
1339                 }
1340         }
1341 }
1342
1343 static void ath10k_pci_disable_irqs(struct ath10k *ar)
1344 {
1345         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1346         int i;
1347
1348         for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1349                 disable_irq(ar_pci->pdev->irq + i);
1350 }
1351
1352 static void ath10k_pci_hif_stop(struct ath10k *ar)
1353 {
1354         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1355
1356         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
1357
1358         /* Irqs are never explicitly re-enabled. They are implicitly re-enabled
1359          * by upon power_up. */
1360         ath10k_pci_disable_irqs(ar);
1361
1362         ath10k_pci_stop_ce(ar);
1363
1364         /* At this point, asynchronous threads are stopped, the target should
1365          * not DMA nor interrupt. We process the leftovers and then free
1366          * everything else up. */
1367
1368         ath10k_pci_process_ce(ar);
1369         ath10k_pci_cleanup_ce(ar);
1370         ath10k_pci_buffer_cleanup(ar);
1371
1372         /* Make the sure the device won't access any structures on the host by
1373          * resetting it. The device was fed with PCI CE ringbuffer
1374          * configuration during init. If ringbuffers are freed and the device
1375          * were to access them this could lead to memory corruption on the
1376          * host. */
1377         ath10k_pci_device_reset(ar);
1378
1379         ar_pci->started = 0;
1380 }
1381
1382 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1383                                            void *req, u32 req_len,
1384                                            void *resp, u32 *resp_len)
1385 {
1386         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1387         struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1388         struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1389         struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1390         struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1391         dma_addr_t req_paddr = 0;
1392         dma_addr_t resp_paddr = 0;
1393         struct bmi_xfer xfer = {};
1394         void *treq, *tresp = NULL;
1395         int ret = 0;
1396
1397         if (resp && !resp_len)
1398                 return -EINVAL;
1399
1400         if (resp && resp_len && *resp_len == 0)
1401                 return -EINVAL;
1402
1403         treq = kmemdup(req, req_len, GFP_KERNEL);
1404         if (!treq)
1405                 return -ENOMEM;
1406
1407         req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1408         ret = dma_mapping_error(ar->dev, req_paddr);
1409         if (ret)
1410                 goto err_dma;
1411
1412         if (resp && resp_len) {
1413                 tresp = kzalloc(*resp_len, GFP_KERNEL);
1414                 if (!tresp) {
1415                         ret = -ENOMEM;
1416                         goto err_req;
1417                 }
1418
1419                 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1420                                             DMA_FROM_DEVICE);
1421                 ret = dma_mapping_error(ar->dev, resp_paddr);
1422                 if (ret)
1423                         goto err_req;
1424
1425                 xfer.wait_for_resp = true;
1426                 xfer.resp_len = 0;
1427
1428                 ath10k_ce_recv_buf_enqueue(ce_rx, &xfer, resp_paddr);
1429         }
1430
1431         init_completion(&xfer.done);
1432
1433         ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1434         if (ret)
1435                 goto err_resp;
1436
1437         ret = wait_for_completion_timeout(&xfer.done,
1438                                           BMI_COMMUNICATION_TIMEOUT_HZ);
1439         if (ret <= 0) {
1440                 u32 unused_buffer;
1441                 unsigned int unused_nbytes;
1442                 unsigned int unused_id;
1443
1444                 ret = -ETIMEDOUT;
1445                 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1446                                            &unused_nbytes, &unused_id);
1447         } else {
1448                 /* non-zero means we did not time out */
1449                 ret = 0;
1450         }
1451
1452 err_resp:
1453         if (resp) {
1454                 u32 unused_buffer;
1455
1456                 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1457                 dma_unmap_single(ar->dev, resp_paddr,
1458                                  *resp_len, DMA_FROM_DEVICE);
1459         }
1460 err_req:
1461         dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1462
1463         if (ret == 0 && resp_len) {
1464                 *resp_len = min(*resp_len, xfer.resp_len);
1465                 memcpy(resp, tresp, xfer.resp_len);
1466         }
1467 err_dma:
1468         kfree(treq);
1469         kfree(tresp);
1470
1471         return ret;
1472 }
1473
1474 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1475 {
1476         struct bmi_xfer *xfer;
1477         u32 ce_data;
1478         unsigned int nbytes;
1479         unsigned int transfer_id;
1480
1481         if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1482                                           &nbytes, &transfer_id))
1483                 return;
1484
1485         if (xfer->wait_for_resp)
1486                 return;
1487
1488         complete(&xfer->done);
1489 }
1490
1491 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1492 {
1493         struct bmi_xfer *xfer;
1494         u32 ce_data;
1495         unsigned int nbytes;
1496         unsigned int transfer_id;
1497         unsigned int flags;
1498
1499         if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1500                                           &nbytes, &transfer_id, &flags))
1501                 return;
1502
1503         if (!xfer->wait_for_resp) {
1504                 ath10k_warn("unexpected: BMI data received; ignoring\n");
1505                 return;
1506         }
1507
1508         xfer->resp_len = nbytes;
1509         complete(&xfer->done);
1510 }
1511
1512 /*
1513  * Map from service/endpoint to Copy Engine.
1514  * This table is derived from the CE_PCI TABLE, above.
1515  * It is passed to the Target at startup for use by firmware.
1516  */
1517 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
1518         {
1519                  ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1520                  PIPEDIR_OUT,           /* out = UL = host -> target */
1521                  3,
1522         },
1523         {
1524                  ATH10K_HTC_SVC_ID_WMI_DATA_VO,
1525                  PIPEDIR_IN,            /* in = DL = target -> host */
1526                  2,
1527         },
1528         {
1529                  ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1530                  PIPEDIR_OUT,           /* out = UL = host -> target */
1531                  3,
1532         },
1533         {
1534                  ATH10K_HTC_SVC_ID_WMI_DATA_BK,
1535                  PIPEDIR_IN,            /* in = DL = target -> host */
1536                  2,
1537         },
1538         {
1539                  ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1540                  PIPEDIR_OUT,           /* out = UL = host -> target */
1541                  3,
1542         },
1543         {
1544                  ATH10K_HTC_SVC_ID_WMI_DATA_BE,
1545                  PIPEDIR_IN,            /* in = DL = target -> host */
1546                  2,
1547         },
1548         {
1549                  ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1550                  PIPEDIR_OUT,           /* out = UL = host -> target */
1551                  3,
1552         },
1553         {
1554                  ATH10K_HTC_SVC_ID_WMI_DATA_VI,
1555                  PIPEDIR_IN,            /* in = DL = target -> host */
1556                  2,
1557         },
1558         {
1559                  ATH10K_HTC_SVC_ID_WMI_CONTROL,
1560                  PIPEDIR_OUT,           /* out = UL = host -> target */
1561                  3,
1562         },
1563         {
1564                  ATH10K_HTC_SVC_ID_WMI_CONTROL,
1565                  PIPEDIR_IN,            /* in = DL = target -> host */
1566                  2,
1567         },
1568         {
1569                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1570                  PIPEDIR_OUT,           /* out = UL = host -> target */
1571                  0,             /* could be moved to 3 (share with WMI) */
1572         },
1573         {
1574                  ATH10K_HTC_SVC_ID_RSVD_CTRL,
1575                  PIPEDIR_IN,            /* in = DL = target -> host */
1576                  1,
1577         },
1578         {
1579                  ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
1580                  PIPEDIR_OUT,           /* out = UL = host -> target */
1581                  0,
1582         },
1583         {
1584                  ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS,    /* not currently used */
1585                  PIPEDIR_IN,            /* in = DL = target -> host */
1586                  1,
1587         },
1588         {
1589                  ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1590                  PIPEDIR_OUT,           /* out = UL = host -> target */
1591                  4,
1592         },
1593         {
1594                  ATH10K_HTC_SVC_ID_HTT_DATA_MSG,
1595                  PIPEDIR_IN,            /* in = DL = target -> host */
1596                  1,
1597         },
1598
1599         /* (Additions here) */
1600
1601         {                               /* Must be last */
1602                  0,
1603                  0,
1604                  0,
1605         },
1606 };
1607
1608 /*
1609  * Send an interrupt to the device to wake up the Target CPU
1610  * so it has an opportunity to notice any changed state.
1611  */
1612 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1613 {
1614         int ret;
1615         u32 core_ctrl;
1616
1617         ret = ath10k_pci_diag_read_access(ar, SOC_CORE_BASE_ADDRESS |
1618                                               CORE_CTRL_ADDRESS,
1619                                           &core_ctrl);
1620         if (ret) {
1621                 ath10k_warn("failed to read core_ctrl: %d\n", ret);
1622                 return ret;
1623         }
1624
1625         /* A_INUM_FIRMWARE interrupt to Target CPU */
1626         core_ctrl |= CORE_CTRL_CPU_INTR_MASK;
1627
1628         ret = ath10k_pci_diag_write_access(ar, SOC_CORE_BASE_ADDRESS |
1629                                                CORE_CTRL_ADDRESS,
1630                                            core_ctrl);
1631         if (ret) {
1632                 ath10k_warn("failed to set target CPU interrupt mask: %d\n",
1633                             ret);
1634                 return ret;
1635         }
1636
1637         return 0;
1638 }
1639
1640 static int ath10k_pci_init_config(struct ath10k *ar)
1641 {
1642         u32 interconnect_targ_addr;
1643         u32 pcie_state_targ_addr = 0;
1644         u32 pipe_cfg_targ_addr = 0;
1645         u32 svc_to_pipe_map = 0;
1646         u32 pcie_config_flags = 0;
1647         u32 ealloc_value;
1648         u32 ealloc_targ_addr;
1649         u32 flag2_value;
1650         u32 flag2_targ_addr;
1651         int ret = 0;
1652
1653         /* Download to Target the CE Config and the service-to-CE map */
1654         interconnect_targ_addr =
1655                 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1656
1657         /* Supply Target-side CE configuration */
1658         ret = ath10k_pci_diag_read_access(ar, interconnect_targ_addr,
1659                                           &pcie_state_targ_addr);
1660         if (ret != 0) {
1661                 ath10k_err("Failed to get pcie state addr: %d\n", ret);
1662                 return ret;
1663         }
1664
1665         if (pcie_state_targ_addr == 0) {
1666                 ret = -EIO;
1667                 ath10k_err("Invalid pcie state addr\n");
1668                 return ret;
1669         }
1670
1671         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1672                                           offsetof(struct pcie_state,
1673                                                    pipe_cfg_addr),
1674                                           &pipe_cfg_targ_addr);
1675         if (ret != 0) {
1676                 ath10k_err("Failed to get pipe cfg addr: %d\n", ret);
1677                 return ret;
1678         }
1679
1680         if (pipe_cfg_targ_addr == 0) {
1681                 ret = -EIO;
1682                 ath10k_err("Invalid pipe cfg addr\n");
1683                 return ret;
1684         }
1685
1686         ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1687                                  target_ce_config_wlan,
1688                                  sizeof(target_ce_config_wlan));
1689
1690         if (ret != 0) {
1691                 ath10k_err("Failed to write pipe cfg: %d\n", ret);
1692                 return ret;
1693         }
1694
1695         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1696                                           offsetof(struct pcie_state,
1697                                                    svc_to_pipe_map),
1698                                           &svc_to_pipe_map);
1699         if (ret != 0) {
1700                 ath10k_err("Failed to get svc/pipe map: %d\n", ret);
1701                 return ret;
1702         }
1703
1704         if (svc_to_pipe_map == 0) {
1705                 ret = -EIO;
1706                 ath10k_err("Invalid svc_to_pipe map\n");
1707                 return ret;
1708         }
1709
1710         ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1711                                  target_service_to_ce_map_wlan,
1712                                  sizeof(target_service_to_ce_map_wlan));
1713         if (ret != 0) {
1714                 ath10k_err("Failed to write svc/pipe map: %d\n", ret);
1715                 return ret;
1716         }
1717
1718         ret = ath10k_pci_diag_read_access(ar, pcie_state_targ_addr +
1719                                           offsetof(struct pcie_state,
1720                                                    config_flags),
1721                                           &pcie_config_flags);
1722         if (ret != 0) {
1723                 ath10k_err("Failed to get pcie config_flags: %d\n", ret);
1724                 return ret;
1725         }
1726
1727         pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1728
1729         ret = ath10k_pci_diag_write_mem(ar, pcie_state_targ_addr +
1730                                  offsetof(struct pcie_state, config_flags),
1731                                  &pcie_config_flags,
1732                                  sizeof(pcie_config_flags));
1733         if (ret != 0) {
1734                 ath10k_err("Failed to write pcie config_flags: %d\n", ret);
1735                 return ret;
1736         }
1737
1738         /* configure early allocation */
1739         ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1740
1741         ret = ath10k_pci_diag_read_access(ar, ealloc_targ_addr, &ealloc_value);
1742         if (ret != 0) {
1743                 ath10k_err("Faile to get early alloc val: %d\n", ret);
1744                 return ret;
1745         }
1746
1747         /* first bank is switched to IRAM */
1748         ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1749                          HI_EARLY_ALLOC_MAGIC_MASK);
1750         ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1751                          HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1752
1753         ret = ath10k_pci_diag_write_access(ar, ealloc_targ_addr, ealloc_value);
1754         if (ret != 0) {
1755                 ath10k_err("Failed to set early alloc val: %d\n", ret);
1756                 return ret;
1757         }
1758
1759         /* Tell Target to proceed with initialization */
1760         flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1761
1762         ret = ath10k_pci_diag_read_access(ar, flag2_targ_addr, &flag2_value);
1763         if (ret != 0) {
1764                 ath10k_err("Failed to get option val: %d\n", ret);
1765                 return ret;
1766         }
1767
1768         flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1769
1770         ret = ath10k_pci_diag_write_access(ar, flag2_targ_addr, flag2_value);
1771         if (ret != 0) {
1772                 ath10k_err("Failed to set option val: %d\n", ret);
1773                 return ret;
1774         }
1775
1776         return 0;
1777 }
1778
1779
1780
1781 static int ath10k_pci_ce_init(struct ath10k *ar)
1782 {
1783         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1784         struct ath10k_pci_pipe *pipe_info;
1785         const struct ce_attr *attr;
1786         int pipe_num;
1787
1788         for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1789                 pipe_info = &ar_pci->pipe_info[pipe_num];
1790                 pipe_info->pipe_num = pipe_num;
1791                 pipe_info->hif_ce_state = ar;
1792                 attr = &host_ce_config_wlan[pipe_num];
1793
1794                 pipe_info->ce_hdl = ath10k_ce_init(ar, pipe_num, attr);
1795                 if (pipe_info->ce_hdl == NULL) {
1796                         ath10k_err("failed to initialize CE for pipe: %d\n",
1797                                    pipe_num);
1798
1799                         /* It is safe to call it here. It checks if ce_hdl is
1800                          * valid for each pipe */
1801                         ath10k_pci_ce_deinit(ar);
1802                         return -1;
1803                 }
1804
1805                 if (pipe_num == CE_COUNT - 1) {
1806                         /*
1807                          * Reserve the ultimate CE for
1808                          * diagnostic Window support
1809                          */
1810                         ar_pci->ce_diag = pipe_info->ce_hdl;
1811                         continue;
1812                 }
1813
1814                 pipe_info->buf_sz = (size_t) (attr->src_sz_max);
1815         }
1816
1817         return 0;
1818 }
1819
1820 static void ath10k_pci_fw_interrupt_handler(struct ath10k *ar)
1821 {
1822         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1823         u32 fw_indicator_address, fw_indicator;
1824
1825         ath10k_pci_wake(ar);
1826
1827         fw_indicator_address = ar_pci->fw_indicator_address;
1828         fw_indicator = ath10k_pci_read32(ar, fw_indicator_address);
1829
1830         if (fw_indicator & FW_IND_EVENT_PENDING) {
1831                 /* ACK: clear Target-side pending event */
1832                 ath10k_pci_write32(ar, fw_indicator_address,
1833                                    fw_indicator & ~FW_IND_EVENT_PENDING);
1834
1835                 if (ar_pci->started) {
1836                         ath10k_pci_hif_dump_area(ar);
1837                 } else {
1838                         /*
1839                          * Probable Target failure before we're prepared
1840                          * to handle it.  Generally unexpected.
1841                          */
1842                         ath10k_warn("early firmware event indicated\n");
1843                 }
1844         }
1845
1846         ath10k_pci_sleep(ar);
1847 }
1848
1849 static void ath10k_pci_start_bmi(struct ath10k *ar)
1850 {
1851         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1852         struct ath10k_pci_pipe *pipe;
1853
1854         /*
1855          * Initially, establish CE completion handlers for use with BMI.
1856          * These are overwritten with generic handlers after we exit BMI phase.
1857          */
1858         pipe = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1859         ath10k_ce_send_cb_register(pipe->ce_hdl, ath10k_pci_bmi_send_done, 0);
1860
1861         pipe = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1862         ath10k_ce_recv_cb_register(pipe->ce_hdl, ath10k_pci_bmi_recv_data);
1863
1864         ath10k_dbg(ATH10K_DBG_BOOT, "boot start bmi\n");
1865 }
1866
1867 static int ath10k_pci_hif_power_up(struct ath10k *ar)
1868 {
1869         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1870         const char *irq_mode;
1871         int ret;
1872
1873         /*
1874          * Bring the target up cleanly.
1875          *
1876          * The target may be in an undefined state with an AUX-powered Target
1877          * and a Host in WoW mode. If the Host crashes, loses power, or is
1878          * restarted (without unloading the driver) then the Target is left
1879          * (aux) powered and running. On a subsequent driver load, the Target
1880          * is in an unexpected state. We try to catch that here in order to
1881          * reset the Target and retry the probe.
1882          */
1883         ret = ath10k_pci_device_reset(ar);
1884         if (ret) {
1885                 ath10k_err("failed to reset target: %d\n", ret);
1886                 goto err;
1887         }
1888
1889         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1890                 /* Force AWAKE forever */
1891                 ath10k_do_pci_wake(ar);
1892
1893         ret = ath10k_pci_ce_init(ar);
1894         if (ret) {
1895                 ath10k_err("failed to initialize CE: %d\n", ret);
1896                 goto err_ps;
1897         }
1898
1899         ret = ath10k_ce_disable_interrupts(ar);
1900         if (ret) {
1901                 ath10k_err("failed to disable CE interrupts: %d\n", ret);
1902                 goto err_ce;
1903         }
1904
1905         ret = ath10k_pci_init_irq(ar);
1906         if (ret) {
1907                 ath10k_err("failed to init irqs: %d\n", ret);
1908                 goto err_ce;
1909         }
1910
1911         ret = ath10k_pci_request_irq(ar);
1912         if (ret) {
1913                 ath10k_err("failed to request irqs: %d\n", ret);
1914                 goto err_deinit_irq;
1915         }
1916
1917         ret = ath10k_pci_wait_for_target_init(ar);
1918         if (ret) {
1919                 ath10k_err("failed to wait for target to init: %d\n", ret);
1920                 goto err_free_irq;
1921         }
1922
1923         ret = ath10k_ce_enable_err_irq(ar);
1924         if (ret) {
1925                 ath10k_err("failed to enable CE error irq: %d\n", ret);
1926                 goto err_free_irq;
1927         }
1928
1929         ret = ath10k_pci_init_config(ar);
1930         if (ret) {
1931                 ath10k_err("failed to setup init config: %d\n", ret);
1932                 goto err_free_irq;
1933         }
1934
1935         ret = ath10k_pci_wake_target_cpu(ar);
1936         if (ret) {
1937                 ath10k_err("could not wake up target CPU: %d\n", ret);
1938                 goto err_free_irq;
1939         }
1940
1941         ath10k_pci_start_bmi(ar);
1942
1943         if (ar_pci->num_msi_intrs > 1)
1944                 irq_mode = "MSI-X";
1945         else if (ar_pci->num_msi_intrs == 1)
1946                 irq_mode = "MSI";
1947         else
1948                 irq_mode = "legacy";
1949
1950         if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
1951                 ath10k_info("pci irq %s\n", irq_mode);
1952
1953         return 0;
1954
1955 err_free_irq:
1956         ath10k_pci_free_irq(ar);
1957         ath10k_pci_kill_tasklet(ar);
1958         ath10k_pci_device_reset(ar);
1959 err_deinit_irq:
1960         ath10k_pci_deinit_irq(ar);
1961 err_ce:
1962         ath10k_pci_ce_deinit(ar);
1963 err_ps:
1964         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1965                 ath10k_do_pci_sleep(ar);
1966 err:
1967         return ret;
1968 }
1969
1970 static void ath10k_pci_hif_power_down(struct ath10k *ar)
1971 {
1972         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1973
1974         ath10k_pci_free_irq(ar);
1975         ath10k_pci_deinit_irq(ar);
1976         ath10k_pci_device_reset(ar);
1977
1978         ath10k_pci_ce_deinit(ar);
1979         if (!test_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features))
1980                 ath10k_do_pci_sleep(ar);
1981 }
1982
1983 #ifdef CONFIG_PM
1984
1985 #define ATH10K_PCI_PM_CONTROL 0x44
1986
1987 static int ath10k_pci_hif_suspend(struct ath10k *ar)
1988 {
1989         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1990         struct pci_dev *pdev = ar_pci->pdev;
1991         u32 val;
1992
1993         pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1994
1995         if ((val & 0x000000ff) != 0x3) {
1996                 pci_save_state(pdev);
1997                 pci_disable_device(pdev);
1998                 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1999                                        (val & 0xffffff00) | 0x03);
2000         }
2001
2002         return 0;
2003 }
2004
2005 static int ath10k_pci_hif_resume(struct ath10k *ar)
2006 {
2007         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2008         struct pci_dev *pdev = ar_pci->pdev;
2009         u32 val;
2010
2011         pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
2012
2013         if ((val & 0x000000ff) != 0) {
2014                 pci_restore_state(pdev);
2015                 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
2016                                        val & 0xffffff00);
2017                 /*
2018                  * Suspend/Resume resets the PCI configuration space,
2019                  * so we have to re-disable the RETRY_TIMEOUT register (0x41)
2020                  * to keep PCI Tx retries from interfering with C3 CPU state
2021                  */
2022                 pci_read_config_dword(pdev, 0x40, &val);
2023
2024                 if ((val & 0x0000ff00) != 0)
2025                         pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
2026         }
2027
2028         return 0;
2029 }
2030 #endif
2031
2032 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
2033         .send_head              = ath10k_pci_hif_send_head,
2034         .exchange_bmi_msg       = ath10k_pci_hif_exchange_bmi_msg,
2035         .start                  = ath10k_pci_hif_start,
2036         .stop                   = ath10k_pci_hif_stop,
2037         .map_service_to_pipe    = ath10k_pci_hif_map_service_to_pipe,
2038         .get_default_pipe       = ath10k_pci_hif_get_default_pipe,
2039         .send_complete_check    = ath10k_pci_hif_send_complete_check,
2040         .set_callbacks          = ath10k_pci_hif_set_callbacks,
2041         .get_free_queue_number  = ath10k_pci_hif_get_free_queue_number,
2042         .power_up               = ath10k_pci_hif_power_up,
2043         .power_down             = ath10k_pci_hif_power_down,
2044 #ifdef CONFIG_PM
2045         .suspend                = ath10k_pci_hif_suspend,
2046         .resume                 = ath10k_pci_hif_resume,
2047 #endif
2048 };
2049
2050 static void ath10k_pci_ce_tasklet(unsigned long ptr)
2051 {
2052         struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
2053         struct ath10k_pci *ar_pci = pipe->ar_pci;
2054
2055         ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
2056 }
2057
2058 static void ath10k_msi_err_tasklet(unsigned long data)
2059 {
2060         struct ath10k *ar = (struct ath10k *)data;
2061
2062         ath10k_pci_fw_interrupt_handler(ar);
2063 }
2064
2065 /*
2066  * Handler for a per-engine interrupt on a PARTICULAR CE.
2067  * This is used in cases where each CE has a private MSI interrupt.
2068  */
2069 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
2070 {
2071         struct ath10k *ar = arg;
2072         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2073         int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
2074
2075         if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
2076                 ath10k_warn("unexpected/invalid irq %d ce_id %d\n", irq, ce_id);
2077                 return IRQ_HANDLED;
2078         }
2079
2080         /*
2081          * NOTE: We are able to derive ce_id from irq because we
2082          * use a one-to-one mapping for CE's 0..5.
2083          * CE's 6 & 7 do not use interrupts at all.
2084          *
2085          * This mapping must be kept in sync with the mapping
2086          * used by firmware.
2087          */
2088         tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
2089         return IRQ_HANDLED;
2090 }
2091
2092 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
2093 {
2094         struct ath10k *ar = arg;
2095         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2096
2097         tasklet_schedule(&ar_pci->msi_fw_err);
2098         return IRQ_HANDLED;
2099 }
2100
2101 /*
2102  * Top-level interrupt handler for all PCI interrupts from a Target.
2103  * When a block of MSI interrupts is allocated, this top-level handler
2104  * is not used; instead, we directly call the correct sub-handler.
2105  */
2106 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2107 {
2108         struct ath10k *ar = arg;
2109         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2110
2111         if (ar_pci->num_msi_intrs == 0) {
2112                 if (!ath10k_pci_irq_pending(ar))
2113                         return IRQ_NONE;
2114
2115                 /*
2116                  * IMPORTANT: INTR_CLR regiser has to be set after
2117                  * INTR_ENABLE is set to 0, otherwise interrupt can not be
2118                  * really cleared.
2119                  */
2120                 iowrite32(0, ar_pci->mem +
2121                           (SOC_CORE_BASE_ADDRESS |
2122                            PCIE_INTR_ENABLE_ADDRESS));
2123                 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2124                           PCIE_INTR_CE_MASK_ALL,
2125                           ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2126                                          PCIE_INTR_CLR_ADDRESS));
2127                 /*
2128                  * IMPORTANT: this extra read transaction is required to
2129                  * flush the posted write buffer.
2130                  */
2131                 (void) ioread32(ar_pci->mem +
2132                                 (SOC_CORE_BASE_ADDRESS |
2133                                  PCIE_INTR_ENABLE_ADDRESS));
2134         }
2135
2136         tasklet_schedule(&ar_pci->intr_tq);
2137
2138         return IRQ_HANDLED;
2139 }
2140
2141 static void ath10k_pci_tasklet(unsigned long data)
2142 {
2143         struct ath10k *ar = (struct ath10k *)data;
2144         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2145
2146         ath10k_pci_fw_interrupt_handler(ar); /* FIXME: Handle FW error */
2147         ath10k_ce_per_engine_service_any(ar);
2148
2149         if (ar_pci->num_msi_intrs == 0) {
2150                 /* Enable Legacy PCI line interrupts */
2151                 iowrite32(PCIE_INTR_FIRMWARE_MASK |
2152                           PCIE_INTR_CE_MASK_ALL,
2153                           ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2154                                          PCIE_INTR_ENABLE_ADDRESS));
2155                 /*
2156                  * IMPORTANT: this extra read transaction is required to
2157                  * flush the posted write buffer
2158                  */
2159                 (void) ioread32(ar_pci->mem +
2160                                 (SOC_CORE_BASE_ADDRESS |
2161                                  PCIE_INTR_ENABLE_ADDRESS));
2162         }
2163 }
2164
2165 static int ath10k_pci_request_irq_msix(struct ath10k *ar)
2166 {
2167         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2168         int ret, i;
2169
2170         ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2171                           ath10k_pci_msi_fw_handler,
2172                           IRQF_SHARED, "ath10k_pci", ar);
2173         if (ret) {
2174                 ath10k_warn("failed to request MSI-X fw irq %d: %d\n",
2175                             ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2176                 return ret;
2177         }
2178
2179         for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2180                 ret = request_irq(ar_pci->pdev->irq + i,
2181                                   ath10k_pci_per_engine_handler,
2182                                   IRQF_SHARED, "ath10k_pci", ar);
2183                 if (ret) {
2184                         ath10k_warn("failed to request MSI-X ce irq %d: %d\n",
2185                                     ar_pci->pdev->irq + i, ret);
2186
2187                         for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2188                                 free_irq(ar_pci->pdev->irq + i, ar);
2189
2190                         free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2191                         return ret;
2192                 }
2193         }
2194
2195         return 0;
2196 }
2197
2198 static int ath10k_pci_request_irq_msi(struct ath10k *ar)
2199 {
2200         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2201         int ret;
2202
2203         ret = request_irq(ar_pci->pdev->irq,
2204                           ath10k_pci_interrupt_handler,
2205                           IRQF_SHARED, "ath10k_pci", ar);
2206         if (ret) {
2207                 ath10k_warn("failed to request MSI irq %d: %d\n",
2208                             ar_pci->pdev->irq, ret);
2209                 return ret;
2210         }
2211
2212         return 0;
2213 }
2214
2215 static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
2216 {
2217         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2218         int ret;
2219
2220         ret = request_irq(ar_pci->pdev->irq,
2221                           ath10k_pci_interrupt_handler,
2222                           IRQF_SHARED, "ath10k_pci", ar);
2223         if (ret) {
2224                 ath10k_warn("failed to request legacy irq %d: %d\n",
2225                             ar_pci->pdev->irq, ret);
2226                 return ret;
2227         }
2228
2229         return 0;
2230 }
2231
2232 static int ath10k_pci_request_irq(struct ath10k *ar)
2233 {
2234         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2235
2236         switch (ar_pci->num_msi_intrs) {
2237         case 0:
2238                 return ath10k_pci_request_irq_legacy(ar);
2239         case 1:
2240                 return ath10k_pci_request_irq_msi(ar);
2241         case MSI_NUM_REQUEST:
2242                 return ath10k_pci_request_irq_msix(ar);
2243         }
2244
2245         ath10k_warn("unknown irq configuration upon request\n");
2246         return -EINVAL;
2247 }
2248
2249 static void ath10k_pci_free_irq(struct ath10k *ar)
2250 {
2251         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2252         int i;
2253
2254         /* There's at least one interrupt irregardless whether its legacy INTR
2255          * or MSI or MSI-X */
2256         for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2257                 free_irq(ar_pci->pdev->irq + i, ar);
2258 }
2259
2260 static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2261 {
2262         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2263         int i;
2264
2265         tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2266         tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2267                      (unsigned long)ar);
2268
2269         for (i = 0; i < CE_COUNT; i++) {
2270                 ar_pci->pipe_info[i].ar_pci = ar_pci;
2271                 tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2272                              (unsigned long)&ar_pci->pipe_info[i]);
2273         }
2274 }
2275
2276 static int ath10k_pci_init_irq(struct ath10k *ar)
2277 {
2278         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2279         int ret;
2280
2281         ath10k_pci_init_irq_tasklets(ar);
2282
2283         if (!test_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features))
2284                 goto msi;
2285
2286         /* Try MSI-X */
2287         ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
2288         ret = pci_enable_msi_block(ar_pci->pdev, ar_pci->num_msi_intrs);
2289         if (ret == 0)
2290                 return 0;
2291         if (ret > 0)
2292                 pci_disable_msi(ar_pci->pdev);
2293
2294 msi:
2295         /* Try MSI */
2296         ar_pci->num_msi_intrs = 1;
2297         ret = pci_enable_msi(ar_pci->pdev);
2298         if (ret == 0)
2299                 return 0;
2300
2301         /* Try legacy irq
2302          *
2303          * A potential race occurs here: The CORE_BASE write
2304          * depends on target correctly decoding AXI address but
2305          * host won't know when target writes BAR to CORE_CTRL.
2306          * This write might get lost if target has NOT written BAR.
2307          * For now, fix the race by repeating the write in below
2308          * synchronization checking. */
2309         ar_pci->num_msi_intrs = 0;
2310
2311         ret = ath10k_pci_wake(ar);
2312         if (ret) {
2313                 ath10k_warn("failed to wake target: %d\n", ret);
2314                 return ret;
2315         }
2316
2317         ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2318                            PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2319         ath10k_pci_sleep(ar);
2320
2321         return 0;
2322 }
2323
2324 static int ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2325 {
2326         int ret;
2327
2328         ret = ath10k_pci_wake(ar);
2329         if (ret) {
2330                 ath10k_warn("failed to wake target: %d\n", ret);
2331                 return ret;
2332         }
2333
2334         ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2335                            0);
2336         ath10k_pci_sleep(ar);
2337
2338         return 0;
2339 }
2340
2341 static int ath10k_pci_deinit_irq(struct ath10k *ar)
2342 {
2343         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2344
2345         switch (ar_pci->num_msi_intrs) {
2346         case 0:
2347                 return ath10k_pci_deinit_irq_legacy(ar);
2348         case 1:
2349                 /* fall-through */
2350         case MSI_NUM_REQUEST:
2351                 pci_disable_msi(ar_pci->pdev);
2352                 return 0;
2353         }
2354
2355         ath10k_warn("unknown irq configuration upon deinit\n");
2356         return -EINVAL;
2357 }
2358
2359 static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2360 {
2361         struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2362         int wait_limit = 300; /* 3 sec */
2363         int ret;
2364
2365         ret = ath10k_pci_wake(ar);
2366         if (ret) {
2367                 ath10k_err("failed to wake up target: %d\n", ret);
2368                 return ret;
2369         }
2370
2371         while (wait_limit-- &&
2372                !(ioread32(ar_pci->mem + FW_INDICATOR_ADDRESS) &
2373                  FW_IND_INITIALIZED)) {
2374                 if (ar_pci->num_msi_intrs == 0)
2375                         /* Fix potential race by repeating CORE_BASE writes */
2376                         iowrite32(PCIE_INTR_FIRMWARE_MASK |
2377                                   PCIE_INTR_CE_MASK_ALL,
2378                                   ar_pci->mem + (SOC_CORE_BASE_ADDRESS |
2379                                                  PCIE_INTR_ENABLE_ADDRESS));
2380                 mdelay(10);
2381         }
2382
2383         if (wait_limit < 0) {
2384                 ath10k_err("target stalled\n");
2385                 ret = -EIO;
2386                 goto out;
2387         }
2388
2389 out:
2390         ath10k_pci_sleep(ar);
2391         return ret;
2392 }
2393
2394 static int ath10k_pci_device_reset(struct ath10k *ar)
2395 {
2396         int i, ret;
2397         u32 val;
2398
2399         ret = ath10k_do_pci_wake(ar);
2400         if (ret) {
2401                 ath10k_err("failed to wake up target: %d\n",
2402                            ret);
2403                 return ret;
2404         }
2405
2406         /* Put Target, including PCIe, into RESET. */
2407         val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2408         val |= 1;
2409         ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2410
2411         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2412                 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2413                                           RTC_STATE_COLD_RESET_MASK)
2414                         break;
2415                 msleep(1);
2416         }
2417
2418         /* Pull Target, including PCIe, out of RESET. */
2419         val &= ~1;
2420         ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2421
2422         for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2423                 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2424                                             RTC_STATE_COLD_RESET_MASK))
2425                         break;
2426                 msleep(1);
2427         }
2428
2429         ath10k_do_pci_sleep(ar);
2430         return 0;
2431 }
2432
2433 static void ath10k_pci_dump_features(struct ath10k_pci *ar_pci)
2434 {
2435         int i;
2436
2437         for (i = 0; i < ATH10K_PCI_FEATURE_COUNT; i++) {
2438                 if (!test_bit(i, ar_pci->features))
2439                         continue;
2440
2441                 switch (i) {
2442                 case ATH10K_PCI_FEATURE_MSI_X:
2443                         ath10k_dbg(ATH10K_DBG_BOOT, "device supports MSI-X\n");
2444                         break;
2445                 case ATH10K_PCI_FEATURE_SOC_POWER_SAVE:
2446                         ath10k_dbg(ATH10K_DBG_BOOT, "QCA98XX SoC power save enabled\n");
2447                         break;
2448                 }
2449         }
2450 }
2451
2452 static int ath10k_pci_probe(struct pci_dev *pdev,
2453                             const struct pci_device_id *pci_dev)
2454 {
2455         void __iomem *mem;
2456         int ret = 0;
2457         struct ath10k *ar;
2458         struct ath10k_pci *ar_pci;
2459         u32 lcr_val, chip_id;
2460
2461         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2462
2463         ar_pci = kzalloc(sizeof(*ar_pci), GFP_KERNEL);
2464         if (ar_pci == NULL)
2465                 return -ENOMEM;
2466
2467         ar_pci->pdev = pdev;
2468         ar_pci->dev = &pdev->dev;
2469
2470         switch (pci_dev->device) {
2471         case QCA988X_2_0_DEVICE_ID:
2472                 set_bit(ATH10K_PCI_FEATURE_MSI_X, ar_pci->features);
2473                 break;
2474         default:
2475                 ret = -ENODEV;
2476                 ath10k_err("Unkown device ID: %d\n", pci_dev->device);
2477                 goto err_ar_pci;
2478         }
2479
2480         if (ath10k_target_ps)
2481                 set_bit(ATH10K_PCI_FEATURE_SOC_POWER_SAVE, ar_pci->features);
2482
2483         ath10k_pci_dump_features(ar_pci);
2484
2485         ar = ath10k_core_create(ar_pci, ar_pci->dev, &ath10k_pci_hif_ops);
2486         if (!ar) {
2487                 ath10k_err("failed to create driver core\n");
2488                 ret = -EINVAL;
2489                 goto err_ar_pci;
2490         }
2491
2492         ar_pci->ar = ar;
2493         ar_pci->fw_indicator_address = FW_INDICATOR_ADDRESS;
2494         atomic_set(&ar_pci->keep_awake_count, 0);
2495
2496         pci_set_drvdata(pdev, ar);
2497
2498         /*
2499          * Without any knowledge of the Host, the Target may have been reset or
2500          * power cycled and its Config Space may no longer reflect the PCI
2501          * address space that was assigned earlier by the PCI infrastructure.
2502          * Refresh it now.
2503          */
2504         ret = pci_assign_resource(pdev, BAR_NUM);
2505         if (ret) {
2506                 ath10k_err("failed to assign PCI space: %d\n", ret);
2507                 goto err_ar;
2508         }
2509
2510         ret = pci_enable_device(pdev);
2511         if (ret) {
2512                 ath10k_err("failed to enable PCI device: %d\n", ret);
2513                 goto err_ar;
2514         }
2515
2516         /* Request MMIO resources */
2517         ret = pci_request_region(pdev, BAR_NUM, "ath");
2518         if (ret) {
2519                 ath10k_err("failed to request MMIO region: %d\n", ret);
2520                 goto err_device;
2521         }
2522
2523         /*
2524          * Target structures have a limit of 32 bit DMA pointers.
2525          * DMA pointers can be wider than 32 bits by default on some systems.
2526          */
2527         ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2528         if (ret) {
2529                 ath10k_err("failed to set DMA mask to 32-bit: %d\n", ret);
2530                 goto err_region;
2531         }
2532
2533         ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2534         if (ret) {
2535                 ath10k_err("failed to set consistent DMA mask to 32-bit\n");
2536                 goto err_region;
2537         }
2538
2539         /* Set bus master bit in PCI_COMMAND to enable DMA */
2540         pci_set_master(pdev);
2541
2542         /*
2543          * Temporary FIX: disable ASPM
2544          * Will be removed after the OTP is programmed
2545          */
2546         pci_read_config_dword(pdev, 0x80, &lcr_val);
2547         pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2548
2549         /* Arrange for access to Target SoC registers. */
2550         mem = pci_iomap(pdev, BAR_NUM, 0);
2551         if (!mem) {
2552                 ath10k_err("failed to perform IOMAP for BAR%d\n", BAR_NUM);
2553                 ret = -EIO;
2554                 goto err_master;
2555         }
2556
2557         ar_pci->mem = mem;
2558
2559         spin_lock_init(&ar_pci->ce_lock);
2560
2561         ret = ath10k_do_pci_wake(ar);
2562         if (ret) {
2563                 ath10k_err("Failed to get chip id: %d\n", ret);
2564                 goto err_iomap;
2565         }
2566
2567         chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
2568
2569         ath10k_do_pci_sleep(ar);
2570
2571         ath10k_dbg(ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2572
2573         ret = ath10k_core_register(ar, chip_id);
2574         if (ret) {
2575                 ath10k_err("failed to register driver core: %d\n", ret);
2576                 goto err_iomap;
2577         }
2578
2579         return 0;
2580
2581 err_iomap:
2582         pci_iounmap(pdev, mem);
2583 err_master:
2584         pci_clear_master(pdev);
2585 err_region:
2586         pci_release_region(pdev, BAR_NUM);
2587 err_device:
2588         pci_disable_device(pdev);
2589 err_ar:
2590         ath10k_core_destroy(ar);
2591 err_ar_pci:
2592         /* call HIF PCI free here */
2593         kfree(ar_pci);
2594
2595         return ret;
2596 }
2597
2598 static void ath10k_pci_remove(struct pci_dev *pdev)
2599 {
2600         struct ath10k *ar = pci_get_drvdata(pdev);
2601         struct ath10k_pci *ar_pci;
2602
2603         ath10k_dbg(ATH10K_DBG_PCI, "%s\n", __func__);
2604
2605         if (!ar)
2606                 return;
2607
2608         ar_pci = ath10k_pci_priv(ar);
2609
2610         if (!ar_pci)
2611                 return;
2612
2613         tasklet_kill(&ar_pci->msi_fw_err);
2614
2615         ath10k_core_unregister(ar);
2616
2617         pci_iounmap(pdev, ar_pci->mem);
2618         pci_release_region(pdev, BAR_NUM);
2619         pci_clear_master(pdev);
2620         pci_disable_device(pdev);
2621
2622         ath10k_core_destroy(ar);
2623         kfree(ar_pci);
2624 }
2625
2626 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2627
2628 static struct pci_driver ath10k_pci_driver = {
2629         .name = "ath10k_pci",
2630         .id_table = ath10k_pci_id_table,
2631         .probe = ath10k_pci_probe,
2632         .remove = ath10k_pci_remove,
2633 };
2634
2635 static int __init ath10k_pci_init(void)
2636 {
2637         int ret;
2638
2639         ret = pci_register_driver(&ath10k_pci_driver);
2640         if (ret)
2641                 ath10k_err("failed to register PCI driver: %d\n", ret);
2642
2643         return ret;
2644 }
2645 module_init(ath10k_pci_init);
2646
2647 static void __exit ath10k_pci_exit(void)
2648 {
2649         pci_unregister_driver(&ath10k_pci_driver);
2650 }
2651
2652 module_exit(ath10k_pci_exit);
2653
2654 MODULE_AUTHOR("Qualcomm Atheros");
2655 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2656 MODULE_LICENSE("Dual BSD/GPL");
2657 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
2658 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_OTP_FILE);
2659 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);