]> Pileus Git - ~andy/linux/blob - drivers/net/benet/be_cmds.c
aaef0c731b9a2162421bbe550b89e9305976c22c
[~andy/linux] / drivers / net / benet / be_cmds.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include "be.h"
19 #include "be_cmds.h"
20
21 /* Must be a power of 2 or else MODULO will BUG_ON */
22 static int be_get_temp_freq = 32;
23
24 static void be_mcc_notify(struct be_adapter *adapter)
25 {
26         struct be_queue_info *mccq = &adapter->mcc_obj.q;
27         u32 val = 0;
28
29         if (adapter->eeh_err) {
30                 dev_info(&adapter->pdev->dev,
31                         "Error in Card Detected! Cannot issue commands\n");
32                 return;
33         }
34
35         val |= mccq->id & DB_MCCQ_RING_ID_MASK;
36         val |= 1 << DB_MCCQ_NUM_POSTED_SHIFT;
37
38         wmb();
39         iowrite32(val, adapter->db + DB_MCCQ_OFFSET);
40 }
41
42 /* To check if valid bit is set, check the entire word as we don't know
43  * the endianness of the data (old entry is host endian while a new entry is
44  * little endian) */
45 static inline bool be_mcc_compl_is_new(struct be_mcc_compl *compl)
46 {
47         if (compl->flags != 0) {
48                 compl->flags = le32_to_cpu(compl->flags);
49                 BUG_ON((compl->flags & CQE_FLAGS_VALID_MASK) == 0);
50                 return true;
51         } else {
52                 return false;
53         }
54 }
55
56 /* Need to reset the entire word that houses the valid bit */
57 static inline void be_mcc_compl_use(struct be_mcc_compl *compl)
58 {
59         compl->flags = 0;
60 }
61
62 static int be_mcc_compl_process(struct be_adapter *adapter,
63         struct be_mcc_compl *compl)
64 {
65         u16 compl_status, extd_status;
66
67         /* Just swap the status to host endian; mcc tag is opaquely copied
68          * from mcc_wrb */
69         be_dws_le_to_cpu(compl, 4);
70
71         compl_status = (compl->status >> CQE_STATUS_COMPL_SHIFT) &
72                                 CQE_STATUS_COMPL_MASK;
73
74         if ((compl->tag0 == OPCODE_COMMON_WRITE_FLASHROM) &&
75                 (compl->tag1 == CMD_SUBSYSTEM_COMMON)) {
76                 adapter->flash_status = compl_status;
77                 complete(&adapter->flash_compl);
78         }
79
80         if (compl_status == MCC_STATUS_SUCCESS) {
81                 if (((compl->tag0 == OPCODE_ETH_GET_STATISTICS) ||
82                          (compl->tag0 == OPCODE_ETH_GET_PPORT_STATS)) &&
83                         (compl->tag1 == CMD_SUBSYSTEM_ETH)) {
84                         if (adapter->generation == BE_GEN3) {
85                                 if (lancer_chip(adapter)) {
86                                         struct lancer_cmd_resp_pport_stats
87                                                 *resp = adapter->stats_cmd.va;
88                                         be_dws_le_to_cpu(&resp->pport_stats,
89                                                 sizeof(resp->pport_stats));
90                                 } else {
91                                         struct be_cmd_resp_get_stats_v1 *resp =
92                                                         adapter->stats_cmd.va;
93
94                                 be_dws_le_to_cpu(&resp->hw_stats,
95                                                         sizeof(resp->hw_stats));
96                                 }
97                         } else {
98                                 struct be_cmd_resp_get_stats_v0 *resp =
99                                                         adapter->stats_cmd.va;
100
101                                 be_dws_le_to_cpu(&resp->hw_stats,
102                                                         sizeof(resp->hw_stats));
103                         }
104                         be_parse_stats(adapter);
105                         netdev_stats_update(adapter);
106                         adapter->stats_cmd_sent = false;
107                 }
108         } else if ((compl_status != MCC_STATUS_NOT_SUPPORTED) &&
109                    (compl->tag0 != OPCODE_COMMON_NTWK_MAC_QUERY)) {
110                 extd_status = (compl->status >> CQE_STATUS_EXTD_SHIFT) &
111                                 CQE_STATUS_EXTD_MASK;
112                 dev_warn(&adapter->pdev->dev,
113                 "Error in cmd completion - opcode %d, compl %d, extd %d\n",
114                         compl->tag0, compl_status, extd_status);
115         }
116         return compl_status;
117 }
118
119 /* Link state evt is a string of bytes; no need for endian swapping */
120 static void be_async_link_state_process(struct be_adapter *adapter,
121                 struct be_async_event_link_state *evt)
122 {
123         be_link_status_update(adapter,
124                 evt->port_link_status == ASYNC_EVENT_LINK_UP);
125 }
126
127 /* Grp5 CoS Priority evt */
128 static void be_async_grp5_cos_priority_process(struct be_adapter *adapter,
129                 struct be_async_event_grp5_cos_priority *evt)
130 {
131         if (evt->valid) {
132                 adapter->vlan_prio_bmap = evt->available_priority_bmap;
133                 adapter->recommended_prio &= ~VLAN_PRIO_MASK;
134                 adapter->recommended_prio =
135                         evt->reco_default_priority << VLAN_PRIO_SHIFT;
136         }
137 }
138
139 /* Grp5 QOS Speed evt */
140 static void be_async_grp5_qos_speed_process(struct be_adapter *adapter,
141                 struct be_async_event_grp5_qos_link_speed *evt)
142 {
143         if (evt->physical_port == adapter->port_num) {
144                 /* qos_link_speed is in units of 10 Mbps */
145                 adapter->link_speed = evt->qos_link_speed * 10;
146         }
147 }
148
149 /*Grp5 PVID evt*/
150 static void be_async_grp5_pvid_state_process(struct be_adapter *adapter,
151                 struct be_async_event_grp5_pvid_state *evt)
152 {
153         if (evt->enabled)
154                 adapter->pvid = le16_to_cpu(evt->tag);
155         else
156                 adapter->pvid = 0;
157 }
158
159 static void be_async_grp5_evt_process(struct be_adapter *adapter,
160                 u32 trailer, struct be_mcc_compl *evt)
161 {
162         u8 event_type = 0;
163
164         event_type = (trailer >> ASYNC_TRAILER_EVENT_TYPE_SHIFT) &
165                 ASYNC_TRAILER_EVENT_TYPE_MASK;
166
167         switch (event_type) {
168         case ASYNC_EVENT_COS_PRIORITY:
169                 be_async_grp5_cos_priority_process(adapter,
170                 (struct be_async_event_grp5_cos_priority *)evt);
171         break;
172         case ASYNC_EVENT_QOS_SPEED:
173                 be_async_grp5_qos_speed_process(adapter,
174                 (struct be_async_event_grp5_qos_link_speed *)evt);
175         break;
176         case ASYNC_EVENT_PVID_STATE:
177                 be_async_grp5_pvid_state_process(adapter,
178                 (struct be_async_event_grp5_pvid_state *)evt);
179         break;
180         default:
181                 dev_warn(&adapter->pdev->dev, "Unknown grp5 event!\n");
182                 break;
183         }
184 }
185
186 static inline bool is_link_state_evt(u32 trailer)
187 {
188         return ((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
189                 ASYNC_TRAILER_EVENT_CODE_MASK) ==
190                                 ASYNC_EVENT_CODE_LINK_STATE;
191 }
192
193 static inline bool is_grp5_evt(u32 trailer)
194 {
195         return (((trailer >> ASYNC_TRAILER_EVENT_CODE_SHIFT) &
196                 ASYNC_TRAILER_EVENT_CODE_MASK) ==
197                                 ASYNC_EVENT_CODE_GRP_5);
198 }
199
200 static struct be_mcc_compl *be_mcc_compl_get(struct be_adapter *adapter)
201 {
202         struct be_queue_info *mcc_cq = &adapter->mcc_obj.cq;
203         struct be_mcc_compl *compl = queue_tail_node(mcc_cq);
204
205         if (be_mcc_compl_is_new(compl)) {
206                 queue_tail_inc(mcc_cq);
207                 return compl;
208         }
209         return NULL;
210 }
211
212 void be_async_mcc_enable(struct be_adapter *adapter)
213 {
214         spin_lock_bh(&adapter->mcc_cq_lock);
215
216         be_cq_notify(adapter, adapter->mcc_obj.cq.id, true, 0);
217         adapter->mcc_obj.rearm_cq = true;
218
219         spin_unlock_bh(&adapter->mcc_cq_lock);
220 }
221
222 void be_async_mcc_disable(struct be_adapter *adapter)
223 {
224         adapter->mcc_obj.rearm_cq = false;
225 }
226
227 int be_process_mcc(struct be_adapter *adapter, int *status)
228 {
229         struct be_mcc_compl *compl;
230         int num = 0;
231         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
232
233         spin_lock_bh(&adapter->mcc_cq_lock);
234         while ((compl = be_mcc_compl_get(adapter))) {
235                 if (compl->flags & CQE_FLAGS_ASYNC_MASK) {
236                         /* Interpret flags as an async trailer */
237                         if (is_link_state_evt(compl->flags))
238                                 be_async_link_state_process(adapter,
239                                 (struct be_async_event_link_state *) compl);
240                         else if (is_grp5_evt(compl->flags))
241                                 be_async_grp5_evt_process(adapter,
242                                 compl->flags, compl);
243                 } else if (compl->flags & CQE_FLAGS_COMPLETED_MASK) {
244                                 *status = be_mcc_compl_process(adapter, compl);
245                                 atomic_dec(&mcc_obj->q.used);
246                 }
247                 be_mcc_compl_use(compl);
248                 num++;
249         }
250
251         spin_unlock_bh(&adapter->mcc_cq_lock);
252         return num;
253 }
254
255 /* Wait till no more pending mcc requests are present */
256 static int be_mcc_wait_compl(struct be_adapter *adapter)
257 {
258 #define mcc_timeout             120000 /* 12s timeout */
259         int i, num, status = 0;
260         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
261
262         if (adapter->eeh_err)
263                 return -EIO;
264
265         for (i = 0; i < mcc_timeout; i++) {
266                 num = be_process_mcc(adapter, &status);
267                 if (num)
268                         be_cq_notify(adapter, mcc_obj->cq.id,
269                                 mcc_obj->rearm_cq, num);
270
271                 if (atomic_read(&mcc_obj->q.used) == 0)
272                         break;
273                 udelay(100);
274         }
275         if (i == mcc_timeout) {
276                 dev_err(&adapter->pdev->dev, "mccq poll timed out\n");
277                 return -1;
278         }
279         return status;
280 }
281
282 /* Notify MCC requests and wait for completion */
283 static int be_mcc_notify_wait(struct be_adapter *adapter)
284 {
285         be_mcc_notify(adapter);
286         return be_mcc_wait_compl(adapter);
287 }
288
289 static int be_mbox_db_ready_wait(struct be_adapter *adapter, void __iomem *db)
290 {
291         int msecs = 0;
292         u32 ready;
293
294         if (adapter->eeh_err) {
295                 dev_err(&adapter->pdev->dev,
296                         "Error detected in card.Cannot issue commands\n");
297                 return -EIO;
298         }
299
300         do {
301                 ready = ioread32(db);
302                 if (ready == 0xffffffff) {
303                         dev_err(&adapter->pdev->dev,
304                                 "pci slot disconnected\n");
305                         return -1;
306                 }
307
308                 ready &= MPU_MAILBOX_DB_RDY_MASK;
309                 if (ready)
310                         break;
311
312                 if (msecs > 4000) {
313                         dev_err(&adapter->pdev->dev, "mbox poll timed out\n");
314                         if (!lancer_chip(adapter))
315                                 be_detect_dump_ue(adapter);
316                         return -1;
317                 }
318
319                 msleep(1);
320                 msecs++;
321         } while (true);
322
323         return 0;
324 }
325
326 /*
327  * Insert the mailbox address into the doorbell in two steps
328  * Polls on the mbox doorbell till a command completion (or a timeout) occurs
329  */
330 static int be_mbox_notify_wait(struct be_adapter *adapter)
331 {
332         int status;
333         u32 val = 0;
334         void __iomem *db = adapter->db + MPU_MAILBOX_DB_OFFSET;
335         struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
336         struct be_mcc_mailbox *mbox = mbox_mem->va;
337         struct be_mcc_compl *compl = &mbox->compl;
338
339         /* wait for ready to be set */
340         status = be_mbox_db_ready_wait(adapter, db);
341         if (status != 0)
342                 return status;
343
344         val |= MPU_MAILBOX_DB_HI_MASK;
345         /* at bits 2 - 31 place mbox dma addr msb bits 34 - 63 */
346         val |= (upper_32_bits(mbox_mem->dma) >> 2) << 2;
347         iowrite32(val, db);
348
349         /* wait for ready to be set */
350         status = be_mbox_db_ready_wait(adapter, db);
351         if (status != 0)
352                 return status;
353
354         val = 0;
355         /* at bits 2 - 31 place mbox dma addr lsb bits 4 - 33 */
356         val |= (u32)(mbox_mem->dma >> 4) << 2;
357         iowrite32(val, db);
358
359         status = be_mbox_db_ready_wait(adapter, db);
360         if (status != 0)
361                 return status;
362
363         /* A cq entry has been made now */
364         if (be_mcc_compl_is_new(compl)) {
365                 status = be_mcc_compl_process(adapter, &mbox->compl);
366                 be_mcc_compl_use(compl);
367                 if (status)
368                         return status;
369         } else {
370                 dev_err(&adapter->pdev->dev, "invalid mailbox completion\n");
371                 return -1;
372         }
373         return 0;
374 }
375
376 static int be_POST_stage_get(struct be_adapter *adapter, u16 *stage)
377 {
378         u32 sem;
379
380         if (lancer_chip(adapter))
381                 sem  = ioread32(adapter->db + MPU_EP_SEMAPHORE_IF_TYPE2_OFFSET);
382         else
383                 sem  = ioread32(adapter->csr + MPU_EP_SEMAPHORE_OFFSET);
384
385         *stage = sem & EP_SEMAPHORE_POST_STAGE_MASK;
386         if ((sem >> EP_SEMAPHORE_POST_ERR_SHIFT) & EP_SEMAPHORE_POST_ERR_MASK)
387                 return -1;
388         else
389                 return 0;
390 }
391
392 int be_cmd_POST(struct be_adapter *adapter)
393 {
394         u16 stage;
395         int status, timeout = 0;
396         struct device *dev = &adapter->pdev->dev;
397
398         do {
399                 status = be_POST_stage_get(adapter, &stage);
400                 if (status) {
401                         dev_err(dev, "POST error; stage=0x%x\n", stage);
402                         return -1;
403                 } else if (stage != POST_STAGE_ARMFW_RDY) {
404                         if (msleep_interruptible(2000)) {
405                                 dev_err(dev, "Waiting for POST aborted\n");
406                                 return -EINTR;
407                         }
408                         timeout += 2;
409                 } else {
410                         return 0;
411                 }
412         } while (timeout < 40);
413
414         dev_err(dev, "POST timeout; stage=0x%x\n", stage);
415         return -1;
416 }
417
418 static inline void *embedded_payload(struct be_mcc_wrb *wrb)
419 {
420         return wrb->payload.embedded_payload;
421 }
422
423 static inline struct be_sge *nonembedded_sgl(struct be_mcc_wrb *wrb)
424 {
425         return &wrb->payload.sgl[0];
426 }
427
428 /* Don't touch the hdr after it's prepared */
429 static void be_wrb_hdr_prepare(struct be_mcc_wrb *wrb, int payload_len,
430                                 bool embedded, u8 sge_cnt, u32 opcode)
431 {
432         if (embedded)
433                 wrb->embedded |= MCC_WRB_EMBEDDED_MASK;
434         else
435                 wrb->embedded |= (sge_cnt & MCC_WRB_SGE_CNT_MASK) <<
436                                 MCC_WRB_SGE_CNT_SHIFT;
437         wrb->payload_length = payload_len;
438         wrb->tag0 = opcode;
439         be_dws_cpu_to_le(wrb, 8);
440 }
441
442 /* Don't touch the hdr after it's prepared */
443 static void be_cmd_hdr_prepare(struct be_cmd_req_hdr *req_hdr,
444                                 u8 subsystem, u8 opcode, int cmd_len)
445 {
446         req_hdr->opcode = opcode;
447         req_hdr->subsystem = subsystem;
448         req_hdr->request_length = cpu_to_le32(cmd_len - sizeof(*req_hdr));
449         req_hdr->version = 0;
450 }
451
452 static void be_cmd_page_addrs_prepare(struct phys_addr *pages, u32 max_pages,
453                         struct be_dma_mem *mem)
454 {
455         int i, buf_pages = min(PAGES_4K_SPANNED(mem->va, mem->size), max_pages);
456         u64 dma = (u64)mem->dma;
457
458         for (i = 0; i < buf_pages; i++) {
459                 pages[i].lo = cpu_to_le32(dma & 0xFFFFFFFF);
460                 pages[i].hi = cpu_to_le32(upper_32_bits(dma));
461                 dma += PAGE_SIZE_4K;
462         }
463 }
464
465 /* Converts interrupt delay in microseconds to multiplier value */
466 static u32 eq_delay_to_mult(u32 usec_delay)
467 {
468 #define MAX_INTR_RATE                   651042
469         const u32 round = 10;
470         u32 multiplier;
471
472         if (usec_delay == 0)
473                 multiplier = 0;
474         else {
475                 u32 interrupt_rate = 1000000 / usec_delay;
476                 /* Max delay, corresponding to the lowest interrupt rate */
477                 if (interrupt_rate == 0)
478                         multiplier = 1023;
479                 else {
480                         multiplier = (MAX_INTR_RATE - interrupt_rate) * round;
481                         multiplier /= interrupt_rate;
482                         /* Round the multiplier to the closest value.*/
483                         multiplier = (multiplier + round/2) / round;
484                         multiplier = min(multiplier, (u32)1023);
485                 }
486         }
487         return multiplier;
488 }
489
490 static inline struct be_mcc_wrb *wrb_from_mbox(struct be_adapter *adapter)
491 {
492         struct be_dma_mem *mbox_mem = &adapter->mbox_mem;
493         struct be_mcc_wrb *wrb
494                 = &((struct be_mcc_mailbox *)(mbox_mem->va))->wrb;
495         memset(wrb, 0, sizeof(*wrb));
496         return wrb;
497 }
498
499 static struct be_mcc_wrb *wrb_from_mccq(struct be_adapter *adapter)
500 {
501         struct be_queue_info *mccq = &adapter->mcc_obj.q;
502         struct be_mcc_wrb *wrb;
503
504         if (atomic_read(&mccq->used) >= mccq->len) {
505                 dev_err(&adapter->pdev->dev, "Out of MCCQ wrbs\n");
506                 return NULL;
507         }
508
509         wrb = queue_head_node(mccq);
510         queue_head_inc(mccq);
511         atomic_inc(&mccq->used);
512         memset(wrb, 0, sizeof(*wrb));
513         return wrb;
514 }
515
516 /* Tell fw we're about to start firing cmds by writing a
517  * special pattern across the wrb hdr; uses mbox
518  */
519 int be_cmd_fw_init(struct be_adapter *adapter)
520 {
521         u8 *wrb;
522         int status;
523
524         if (mutex_lock_interruptible(&adapter->mbox_lock))
525                 return -1;
526
527         wrb = (u8 *)wrb_from_mbox(adapter);
528         *wrb++ = 0xFF;
529         *wrb++ = 0x12;
530         *wrb++ = 0x34;
531         *wrb++ = 0xFF;
532         *wrb++ = 0xFF;
533         *wrb++ = 0x56;
534         *wrb++ = 0x78;
535         *wrb = 0xFF;
536
537         status = be_mbox_notify_wait(adapter);
538
539         mutex_unlock(&adapter->mbox_lock);
540         return status;
541 }
542
543 /* Tell fw we're done with firing cmds by writing a
544  * special pattern across the wrb hdr; uses mbox
545  */
546 int be_cmd_fw_clean(struct be_adapter *adapter)
547 {
548         u8 *wrb;
549         int status;
550
551         if (adapter->eeh_err)
552                 return -EIO;
553
554         if (mutex_lock_interruptible(&adapter->mbox_lock))
555                 return -1;
556
557         wrb = (u8 *)wrb_from_mbox(adapter);
558         *wrb++ = 0xFF;
559         *wrb++ = 0xAA;
560         *wrb++ = 0xBB;
561         *wrb++ = 0xFF;
562         *wrb++ = 0xFF;
563         *wrb++ = 0xCC;
564         *wrb++ = 0xDD;
565         *wrb = 0xFF;
566
567         status = be_mbox_notify_wait(adapter);
568
569         mutex_unlock(&adapter->mbox_lock);
570         return status;
571 }
572 int be_cmd_eq_create(struct be_adapter *adapter,
573                 struct be_queue_info *eq, int eq_delay)
574 {
575         struct be_mcc_wrb *wrb;
576         struct be_cmd_req_eq_create *req;
577         struct be_dma_mem *q_mem = &eq->dma_mem;
578         int status;
579
580         if (mutex_lock_interruptible(&adapter->mbox_lock))
581                 return -1;
582
583         wrb = wrb_from_mbox(adapter);
584         req = embedded_payload(wrb);
585
586         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, OPCODE_COMMON_EQ_CREATE);
587
588         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
589                 OPCODE_COMMON_EQ_CREATE, sizeof(*req));
590
591         req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
592
593         AMAP_SET_BITS(struct amap_eq_context, valid, req->context, 1);
594         /* 4byte eqe*/
595         AMAP_SET_BITS(struct amap_eq_context, size, req->context, 0);
596         AMAP_SET_BITS(struct amap_eq_context, count, req->context,
597                         __ilog2_u32(eq->len/256));
598         AMAP_SET_BITS(struct amap_eq_context, delaymult, req->context,
599                         eq_delay_to_mult(eq_delay));
600         be_dws_cpu_to_le(req->context, sizeof(req->context));
601
602         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
603
604         status = be_mbox_notify_wait(adapter);
605         if (!status) {
606                 struct be_cmd_resp_eq_create *resp = embedded_payload(wrb);
607                 eq->id = le16_to_cpu(resp->eq_id);
608                 eq->created = true;
609         }
610
611         mutex_unlock(&adapter->mbox_lock);
612         return status;
613 }
614
615 /* Uses mbox */
616 int be_cmd_mac_addr_query(struct be_adapter *adapter, u8 *mac_addr,
617                         u8 type, bool permanent, u32 if_handle)
618 {
619         struct be_mcc_wrb *wrb;
620         struct be_cmd_req_mac_query *req;
621         int status;
622
623         if (mutex_lock_interruptible(&adapter->mbox_lock))
624                 return -1;
625
626         wrb = wrb_from_mbox(adapter);
627         req = embedded_payload(wrb);
628
629         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
630                         OPCODE_COMMON_NTWK_MAC_QUERY);
631
632         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
633                 OPCODE_COMMON_NTWK_MAC_QUERY, sizeof(*req));
634
635         req->type = type;
636         if (permanent) {
637                 req->permanent = 1;
638         } else {
639                 req->if_id = cpu_to_le16((u16) if_handle);
640                 req->permanent = 0;
641         }
642
643         status = be_mbox_notify_wait(adapter);
644         if (!status) {
645                 struct be_cmd_resp_mac_query *resp = embedded_payload(wrb);
646                 memcpy(mac_addr, resp->mac.addr, ETH_ALEN);
647         }
648
649         mutex_unlock(&adapter->mbox_lock);
650         return status;
651 }
652
653 /* Uses synchronous MCCQ */
654 int be_cmd_pmac_add(struct be_adapter *adapter, u8 *mac_addr,
655                 u32 if_id, u32 *pmac_id, u32 domain)
656 {
657         struct be_mcc_wrb *wrb;
658         struct be_cmd_req_pmac_add *req;
659         int status;
660
661         spin_lock_bh(&adapter->mcc_lock);
662
663         wrb = wrb_from_mccq(adapter);
664         if (!wrb) {
665                 status = -EBUSY;
666                 goto err;
667         }
668         req = embedded_payload(wrb);
669
670         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
671                         OPCODE_COMMON_NTWK_PMAC_ADD);
672
673         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
674                 OPCODE_COMMON_NTWK_PMAC_ADD, sizeof(*req));
675
676         req->hdr.domain = domain;
677         req->if_id = cpu_to_le32(if_id);
678         memcpy(req->mac_address, mac_addr, ETH_ALEN);
679
680         status = be_mcc_notify_wait(adapter);
681         if (!status) {
682                 struct be_cmd_resp_pmac_add *resp = embedded_payload(wrb);
683                 *pmac_id = le32_to_cpu(resp->pmac_id);
684         }
685
686 err:
687         spin_unlock_bh(&adapter->mcc_lock);
688         return status;
689 }
690
691 /* Uses synchronous MCCQ */
692 int be_cmd_pmac_del(struct be_adapter *adapter, u32 if_id, u32 pmac_id, u32 dom)
693 {
694         struct be_mcc_wrb *wrb;
695         struct be_cmd_req_pmac_del *req;
696         int status;
697
698         spin_lock_bh(&adapter->mcc_lock);
699
700         wrb = wrb_from_mccq(adapter);
701         if (!wrb) {
702                 status = -EBUSY;
703                 goto err;
704         }
705         req = embedded_payload(wrb);
706
707         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
708                         OPCODE_COMMON_NTWK_PMAC_DEL);
709
710         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
711                 OPCODE_COMMON_NTWK_PMAC_DEL, sizeof(*req));
712
713         req->hdr.domain = dom;
714         req->if_id = cpu_to_le32(if_id);
715         req->pmac_id = cpu_to_le32(pmac_id);
716
717         status = be_mcc_notify_wait(adapter);
718
719 err:
720         spin_unlock_bh(&adapter->mcc_lock);
721         return status;
722 }
723
724 /* Uses Mbox */
725 int be_cmd_cq_create(struct be_adapter *adapter,
726                 struct be_queue_info *cq, struct be_queue_info *eq,
727                 bool sol_evts, bool no_delay, int coalesce_wm)
728 {
729         struct be_mcc_wrb *wrb;
730         struct be_cmd_req_cq_create *req;
731         struct be_dma_mem *q_mem = &cq->dma_mem;
732         void *ctxt;
733         int status;
734
735         if (mutex_lock_interruptible(&adapter->mbox_lock))
736                 return -1;
737
738         wrb = wrb_from_mbox(adapter);
739         req = embedded_payload(wrb);
740         ctxt = &req->context;
741
742         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
743                         OPCODE_COMMON_CQ_CREATE);
744
745         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
746                 OPCODE_COMMON_CQ_CREATE, sizeof(*req));
747
748         req->num_pages =  cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
749         if (lancer_chip(adapter)) {
750                 req->hdr.version = 2;
751                 req->page_size = 1; /* 1 for 4K */
752                 AMAP_SET_BITS(struct amap_cq_context_lancer, nodelay, ctxt,
753                                                                 no_delay);
754                 AMAP_SET_BITS(struct amap_cq_context_lancer, count, ctxt,
755                                                 __ilog2_u32(cq->len/256));
756                 AMAP_SET_BITS(struct amap_cq_context_lancer, valid, ctxt, 1);
757                 AMAP_SET_BITS(struct amap_cq_context_lancer, eventable,
758                                                                 ctxt, 1);
759                 AMAP_SET_BITS(struct amap_cq_context_lancer, eqid,
760                                                                 ctxt, eq->id);
761                 AMAP_SET_BITS(struct amap_cq_context_lancer, armed, ctxt, 1);
762         } else {
763                 AMAP_SET_BITS(struct amap_cq_context_be, coalescwm, ctxt,
764                                                                 coalesce_wm);
765                 AMAP_SET_BITS(struct amap_cq_context_be, nodelay,
766                                                                 ctxt, no_delay);
767                 AMAP_SET_BITS(struct amap_cq_context_be, count, ctxt,
768                                                 __ilog2_u32(cq->len/256));
769                 AMAP_SET_BITS(struct amap_cq_context_be, valid, ctxt, 1);
770                 AMAP_SET_BITS(struct amap_cq_context_be, solevent,
771                                                                 ctxt, sol_evts);
772                 AMAP_SET_BITS(struct amap_cq_context_be, eventable, ctxt, 1);
773                 AMAP_SET_BITS(struct amap_cq_context_be, eqid, ctxt, eq->id);
774                 AMAP_SET_BITS(struct amap_cq_context_be, armed, ctxt, 1);
775         }
776
777         be_dws_cpu_to_le(ctxt, sizeof(req->context));
778
779         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
780
781         status = be_mbox_notify_wait(adapter);
782         if (!status) {
783                 struct be_cmd_resp_cq_create *resp = embedded_payload(wrb);
784                 cq->id = le16_to_cpu(resp->cq_id);
785                 cq->created = true;
786         }
787
788         mutex_unlock(&adapter->mbox_lock);
789
790         return status;
791 }
792
793 static u32 be_encoded_q_len(int q_len)
794 {
795         u32 len_encoded = fls(q_len); /* log2(len) + 1 */
796         if (len_encoded == 16)
797                 len_encoded = 0;
798         return len_encoded;
799 }
800
801 int be_cmd_mccq_create(struct be_adapter *adapter,
802                         struct be_queue_info *mccq,
803                         struct be_queue_info *cq)
804 {
805         struct be_mcc_wrb *wrb;
806         struct be_cmd_req_mcc_create *req;
807         struct be_dma_mem *q_mem = &mccq->dma_mem;
808         void *ctxt;
809         int status;
810
811         if (mutex_lock_interruptible(&adapter->mbox_lock))
812                 return -1;
813
814         wrb = wrb_from_mbox(adapter);
815         req = embedded_payload(wrb);
816         ctxt = &req->context;
817
818         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
819                         OPCODE_COMMON_MCC_CREATE_EXT);
820
821         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
822                         OPCODE_COMMON_MCC_CREATE_EXT, sizeof(*req));
823
824         req->num_pages = cpu_to_le16(PAGES_4K_SPANNED(q_mem->va, q_mem->size));
825         if (lancer_chip(adapter)) {
826                 req->hdr.version = 1;
827                 req->cq_id = cpu_to_le16(cq->id);
828
829                 AMAP_SET_BITS(struct amap_mcc_context_lancer, ring_size, ctxt,
830                                                 be_encoded_q_len(mccq->len));
831                 AMAP_SET_BITS(struct amap_mcc_context_lancer, valid, ctxt, 1);
832                 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_id,
833                                                                 ctxt, cq->id);
834                 AMAP_SET_BITS(struct amap_mcc_context_lancer, async_cq_valid,
835                                                                  ctxt, 1);
836
837         } else {
838                 AMAP_SET_BITS(struct amap_mcc_context_be, valid, ctxt, 1);
839                 AMAP_SET_BITS(struct amap_mcc_context_be, ring_size, ctxt,
840                                                 be_encoded_q_len(mccq->len));
841                 AMAP_SET_BITS(struct amap_mcc_context_be, cq_id, ctxt, cq->id);
842         }
843
844         /* Subscribe to Link State and Group 5 Events(bits 1 and 5 set) */
845         req->async_event_bitmap[0] = cpu_to_le32(0x00000022);
846         be_dws_cpu_to_le(ctxt, sizeof(req->context));
847
848         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
849
850         status = be_mbox_notify_wait(adapter);
851         if (!status) {
852                 struct be_cmd_resp_mcc_create *resp = embedded_payload(wrb);
853                 mccq->id = le16_to_cpu(resp->id);
854                 mccq->created = true;
855         }
856         mutex_unlock(&adapter->mbox_lock);
857
858         return status;
859 }
860
861 int be_cmd_txq_create(struct be_adapter *adapter,
862                         struct be_queue_info *txq,
863                         struct be_queue_info *cq)
864 {
865         struct be_mcc_wrb *wrb;
866         struct be_cmd_req_eth_tx_create *req;
867         struct be_dma_mem *q_mem = &txq->dma_mem;
868         void *ctxt;
869         int status;
870
871         if (mutex_lock_interruptible(&adapter->mbox_lock))
872                 return -1;
873
874         wrb = wrb_from_mbox(adapter);
875         req = embedded_payload(wrb);
876         ctxt = &req->context;
877
878         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
879                         OPCODE_ETH_TX_CREATE);
880
881         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_TX_CREATE,
882                 sizeof(*req));
883
884         if (lancer_chip(adapter)) {
885                 req->hdr.version = 1;
886                 AMAP_SET_BITS(struct amap_tx_context, if_id, ctxt,
887                                         adapter->if_handle);
888         }
889
890         req->num_pages = PAGES_4K_SPANNED(q_mem->va, q_mem->size);
891         req->ulp_num = BE_ULP1_NUM;
892         req->type = BE_ETH_TX_RING_TYPE_STANDARD;
893
894         AMAP_SET_BITS(struct amap_tx_context, tx_ring_size, ctxt,
895                 be_encoded_q_len(txq->len));
896         AMAP_SET_BITS(struct amap_tx_context, ctx_valid, ctxt, 1);
897         AMAP_SET_BITS(struct amap_tx_context, cq_id_send, ctxt, cq->id);
898
899         be_dws_cpu_to_le(ctxt, sizeof(req->context));
900
901         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
902
903         status = be_mbox_notify_wait(adapter);
904         if (!status) {
905                 struct be_cmd_resp_eth_tx_create *resp = embedded_payload(wrb);
906                 txq->id = le16_to_cpu(resp->cid);
907                 txq->created = true;
908         }
909
910         mutex_unlock(&adapter->mbox_lock);
911
912         return status;
913 }
914
915 /* Uses mbox */
916 int be_cmd_rxq_create(struct be_adapter *adapter,
917                 struct be_queue_info *rxq, u16 cq_id, u16 frag_size,
918                 u16 max_frame_size, u32 if_id, u32 rss, u8 *rss_id)
919 {
920         struct be_mcc_wrb *wrb;
921         struct be_cmd_req_eth_rx_create *req;
922         struct be_dma_mem *q_mem = &rxq->dma_mem;
923         int status;
924
925         if (mutex_lock_interruptible(&adapter->mbox_lock))
926                 return -1;
927
928         wrb = wrb_from_mbox(adapter);
929         req = embedded_payload(wrb);
930
931         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
932                         OPCODE_ETH_RX_CREATE);
933
934         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH, OPCODE_ETH_RX_CREATE,
935                 sizeof(*req));
936
937         req->cq_id = cpu_to_le16(cq_id);
938         req->frag_size = fls(frag_size) - 1;
939         req->num_pages = 2;
940         be_cmd_page_addrs_prepare(req->pages, ARRAY_SIZE(req->pages), q_mem);
941         req->interface_id = cpu_to_le32(if_id);
942         req->max_frame_size = cpu_to_le16(max_frame_size);
943         req->rss_queue = cpu_to_le32(rss);
944
945         status = be_mbox_notify_wait(adapter);
946         if (!status) {
947                 struct be_cmd_resp_eth_rx_create *resp = embedded_payload(wrb);
948                 rxq->id = le16_to_cpu(resp->id);
949                 rxq->created = true;
950                 *rss_id = resp->rss_id;
951         }
952
953         mutex_unlock(&adapter->mbox_lock);
954
955         return status;
956 }
957
958 /* Generic destroyer function for all types of queues
959  * Uses Mbox
960  */
961 int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
962                 int queue_type)
963 {
964         struct be_mcc_wrb *wrb;
965         struct be_cmd_req_q_destroy *req;
966         u8 subsys = 0, opcode = 0;
967         int status;
968
969         if (adapter->eeh_err)
970                 return -EIO;
971
972         if (mutex_lock_interruptible(&adapter->mbox_lock))
973                 return -1;
974
975         wrb = wrb_from_mbox(adapter);
976         req = embedded_payload(wrb);
977
978         switch (queue_type) {
979         case QTYPE_EQ:
980                 subsys = CMD_SUBSYSTEM_COMMON;
981                 opcode = OPCODE_COMMON_EQ_DESTROY;
982                 break;
983         case QTYPE_CQ:
984                 subsys = CMD_SUBSYSTEM_COMMON;
985                 opcode = OPCODE_COMMON_CQ_DESTROY;
986                 break;
987         case QTYPE_TXQ:
988                 subsys = CMD_SUBSYSTEM_ETH;
989                 opcode = OPCODE_ETH_TX_DESTROY;
990                 break;
991         case QTYPE_RXQ:
992                 subsys = CMD_SUBSYSTEM_ETH;
993                 opcode = OPCODE_ETH_RX_DESTROY;
994                 break;
995         case QTYPE_MCCQ:
996                 subsys = CMD_SUBSYSTEM_COMMON;
997                 opcode = OPCODE_COMMON_MCC_DESTROY;
998                 break;
999         default:
1000                 BUG();
1001         }
1002
1003         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0, opcode);
1004
1005         be_cmd_hdr_prepare(&req->hdr, subsys, opcode, sizeof(*req));
1006         req->id = cpu_to_le16(q->id);
1007
1008         status = be_mbox_notify_wait(adapter);
1009
1010         mutex_unlock(&adapter->mbox_lock);
1011
1012         return status;
1013 }
1014
1015 /* Create an rx filtering policy configuration on an i/f
1016  * Uses mbox
1017  */
1018 int be_cmd_if_create(struct be_adapter *adapter, u32 cap_flags, u32 en_flags,
1019                 u8 *mac, bool pmac_invalid, u32 *if_handle, u32 *pmac_id,
1020                 u32 domain)
1021 {
1022         struct be_mcc_wrb *wrb;
1023         struct be_cmd_req_if_create *req;
1024         int status;
1025
1026         if (mutex_lock_interruptible(&adapter->mbox_lock))
1027                 return -1;
1028
1029         wrb = wrb_from_mbox(adapter);
1030         req = embedded_payload(wrb);
1031
1032         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1033                         OPCODE_COMMON_NTWK_INTERFACE_CREATE);
1034
1035         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1036                 OPCODE_COMMON_NTWK_INTERFACE_CREATE, sizeof(*req));
1037
1038         req->hdr.domain = domain;
1039         req->capability_flags = cpu_to_le32(cap_flags);
1040         req->enable_flags = cpu_to_le32(en_flags);
1041         req->pmac_invalid = pmac_invalid;
1042         if (!pmac_invalid)
1043                 memcpy(req->mac_addr, mac, ETH_ALEN);
1044
1045         status = be_mbox_notify_wait(adapter);
1046         if (!status) {
1047                 struct be_cmd_resp_if_create *resp = embedded_payload(wrb);
1048                 *if_handle = le32_to_cpu(resp->interface_id);
1049                 if (!pmac_invalid)
1050                         *pmac_id = le32_to_cpu(resp->pmac_id);
1051         }
1052
1053         mutex_unlock(&adapter->mbox_lock);
1054         return status;
1055 }
1056
1057 /* Uses mbox */
1058 int be_cmd_if_destroy(struct be_adapter *adapter, u32 interface_id, u32 domain)
1059 {
1060         struct be_mcc_wrb *wrb;
1061         struct be_cmd_req_if_destroy *req;
1062         int status;
1063
1064         if (adapter->eeh_err)
1065                 return -EIO;
1066
1067         if (mutex_lock_interruptible(&adapter->mbox_lock))
1068                 return -1;
1069
1070         wrb = wrb_from_mbox(adapter);
1071         req = embedded_payload(wrb);
1072
1073         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1074                         OPCODE_COMMON_NTWK_INTERFACE_DESTROY);
1075
1076         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1077                 OPCODE_COMMON_NTWK_INTERFACE_DESTROY, sizeof(*req));
1078
1079         req->hdr.domain = domain;
1080         req->interface_id = cpu_to_le32(interface_id);
1081
1082         status = be_mbox_notify_wait(adapter);
1083
1084         mutex_unlock(&adapter->mbox_lock);
1085
1086         return status;
1087 }
1088
1089 /* Get stats is a non embedded command: the request is not embedded inside
1090  * WRB but is a separate dma memory block
1091  * Uses asynchronous MCC
1092  */
1093 int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
1094 {
1095         struct be_mcc_wrb *wrb;
1096         struct be_cmd_req_hdr *hdr;
1097         struct be_sge *sge;
1098         int status = 0;
1099
1100         if (MODULO(adapter->work_counter, be_get_temp_freq) == 0)
1101                 be_cmd_get_die_temperature(adapter);
1102
1103         spin_lock_bh(&adapter->mcc_lock);
1104
1105         wrb = wrb_from_mccq(adapter);
1106         if (!wrb) {
1107                 status = -EBUSY;
1108                 goto err;
1109         }
1110         hdr = nonemb_cmd->va;
1111         sge = nonembedded_sgl(wrb);
1112
1113         be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1,
1114                         OPCODE_ETH_GET_STATISTICS);
1115
1116         be_cmd_hdr_prepare(hdr, CMD_SUBSYSTEM_ETH,
1117                 OPCODE_ETH_GET_STATISTICS, nonemb_cmd->size);
1118
1119         if (adapter->generation == BE_GEN3)
1120                 hdr->version = 1;
1121
1122         wrb->tag1 = CMD_SUBSYSTEM_ETH;
1123         sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1124         sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1125         sge->len = cpu_to_le32(nonemb_cmd->size);
1126
1127         be_mcc_notify(adapter);
1128         adapter->stats_cmd_sent = true;
1129
1130 err:
1131         spin_unlock_bh(&adapter->mcc_lock);
1132         return status;
1133 }
1134
1135 /* Lancer Stats */
1136 int lancer_cmd_get_pport_stats(struct be_adapter *adapter,
1137                                 struct be_dma_mem *nonemb_cmd)
1138 {
1139
1140         struct be_mcc_wrb *wrb;
1141         struct lancer_cmd_req_pport_stats *req;
1142         struct be_sge *sge;
1143         int status = 0;
1144
1145         spin_lock_bh(&adapter->mcc_lock);
1146
1147         wrb = wrb_from_mccq(adapter);
1148         if (!wrb) {
1149                 status = -EBUSY;
1150                 goto err;
1151         }
1152         req = nonemb_cmd->va;
1153         sge = nonembedded_sgl(wrb);
1154
1155         be_wrb_hdr_prepare(wrb, nonemb_cmd->size, false, 1,
1156                         OPCODE_ETH_GET_PPORT_STATS);
1157
1158         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1159                         OPCODE_ETH_GET_PPORT_STATS, nonemb_cmd->size);
1160
1161
1162         req->cmd_params.params.pport_num = cpu_to_le16(adapter->port_num);
1163         req->cmd_params.params.reset_stats = 0;
1164
1165         wrb->tag1 = CMD_SUBSYSTEM_ETH;
1166         sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1167         sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1168         sge->len = cpu_to_le32(nonemb_cmd->size);
1169
1170         be_mcc_notify(adapter);
1171         adapter->stats_cmd_sent = true;
1172
1173 err:
1174         spin_unlock_bh(&adapter->mcc_lock);
1175         return status;
1176 }
1177
1178 /* Uses synchronous mcc */
1179 int be_cmd_link_status_query(struct be_adapter *adapter,
1180                         bool *link_up, u8 *mac_speed, u16 *link_speed, u32 dom)
1181 {
1182         struct be_mcc_wrb *wrb;
1183         struct be_cmd_req_link_status *req;
1184         int status;
1185
1186         spin_lock_bh(&adapter->mcc_lock);
1187
1188         wrb = wrb_from_mccq(adapter);
1189         if (!wrb) {
1190                 status = -EBUSY;
1191                 goto err;
1192         }
1193         req = embedded_payload(wrb);
1194
1195         *link_up = false;
1196
1197         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1198                         OPCODE_COMMON_NTWK_LINK_STATUS_QUERY);
1199
1200         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1201                 OPCODE_COMMON_NTWK_LINK_STATUS_QUERY, sizeof(*req));
1202
1203         status = be_mcc_notify_wait(adapter);
1204         if (!status) {
1205                 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
1206                 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
1207                         *link_up = true;
1208                         *link_speed = le16_to_cpu(resp->link_speed);
1209                         *mac_speed = resp->mac_speed;
1210                 }
1211         }
1212
1213 err:
1214         spin_unlock_bh(&adapter->mcc_lock);
1215         return status;
1216 }
1217
1218 /* Uses synchronous mcc */
1219 int be_cmd_get_die_temperature(struct be_adapter *adapter)
1220 {
1221         struct be_mcc_wrb *wrb;
1222         struct be_cmd_req_get_cntl_addnl_attribs *req;
1223         int status;
1224
1225         spin_lock_bh(&adapter->mcc_lock);
1226
1227         wrb = wrb_from_mccq(adapter);
1228         if (!wrb) {
1229                 status = -EBUSY;
1230                 goto err;
1231         }
1232         req = embedded_payload(wrb);
1233
1234         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1235                         OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES);
1236
1237         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1238                 OPCODE_COMMON_GET_CNTL_ADDITIONAL_ATTRIBUTES, sizeof(*req));
1239
1240         status = be_mcc_notify_wait(adapter);
1241         if (!status) {
1242                 struct be_cmd_resp_get_cntl_addnl_attribs *resp =
1243                                                 embedded_payload(wrb);
1244                 adapter->drv_stats.be_on_die_temperature =
1245                                                 resp->on_die_temperature;
1246         }
1247         /* If IOCTL fails once, do not bother issuing it again */
1248         else
1249                 be_get_temp_freq = 0;
1250
1251 err:
1252         spin_unlock_bh(&adapter->mcc_lock);
1253         return status;
1254 }
1255
1256 /* Uses synchronous mcc */
1257 int be_cmd_get_reg_len(struct be_adapter *adapter, u32 *log_size)
1258 {
1259         struct be_mcc_wrb *wrb;
1260         struct be_cmd_req_get_fat *req;
1261         int status;
1262
1263         spin_lock_bh(&adapter->mcc_lock);
1264
1265         wrb = wrb_from_mccq(adapter);
1266         if (!wrb) {
1267                 status = -EBUSY;
1268                 goto err;
1269         }
1270         req = embedded_payload(wrb);
1271
1272         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1273                         OPCODE_COMMON_MANAGE_FAT);
1274
1275         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1276                 OPCODE_COMMON_MANAGE_FAT, sizeof(*req));
1277         req->fat_operation = cpu_to_le32(QUERY_FAT);
1278         status = be_mcc_notify_wait(adapter);
1279         if (!status) {
1280                 struct be_cmd_resp_get_fat *resp = embedded_payload(wrb);
1281                 if (log_size && resp->log_size)
1282                         *log_size = le32_to_cpu(resp->log_size) -
1283                                         sizeof(u32);
1284         }
1285 err:
1286         spin_unlock_bh(&adapter->mcc_lock);
1287         return status;
1288 }
1289
1290 void be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
1291 {
1292         struct be_dma_mem get_fat_cmd;
1293         struct be_mcc_wrb *wrb;
1294         struct be_cmd_req_get_fat *req;
1295         struct be_sge *sge;
1296         u32 offset = 0, total_size, buf_size,
1297                                 log_offset = sizeof(u32), payload_len;
1298         int status;
1299
1300         if (buf_len == 0)
1301                 return;
1302
1303         total_size = buf_len;
1304
1305         get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
1306         get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
1307                         get_fat_cmd.size,
1308                         &get_fat_cmd.dma);
1309         if (!get_fat_cmd.va) {
1310                 status = -ENOMEM;
1311                 dev_err(&adapter->pdev->dev,
1312                 "Memory allocation failure while retrieving FAT data\n");
1313                 return;
1314         }
1315
1316         spin_lock_bh(&adapter->mcc_lock);
1317
1318         while (total_size) {
1319                 buf_size = min(total_size, (u32)60*1024);
1320                 total_size -= buf_size;
1321
1322                 wrb = wrb_from_mccq(adapter);
1323                 if (!wrb) {
1324                         status = -EBUSY;
1325                         goto err;
1326                 }
1327                 req = get_fat_cmd.va;
1328                 sge = nonembedded_sgl(wrb);
1329
1330                 payload_len = sizeof(struct be_cmd_req_get_fat) + buf_size;
1331                 be_wrb_hdr_prepare(wrb, payload_len, false, 1,
1332                                 OPCODE_COMMON_MANAGE_FAT);
1333
1334                 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1335                                 OPCODE_COMMON_MANAGE_FAT, payload_len);
1336
1337                 sge->pa_hi = cpu_to_le32(upper_32_bits(get_fat_cmd.dma));
1338                 sge->pa_lo = cpu_to_le32(get_fat_cmd.dma & 0xFFFFFFFF);
1339                 sge->len = cpu_to_le32(get_fat_cmd.size);
1340
1341                 req->fat_operation = cpu_to_le32(RETRIEVE_FAT);
1342                 req->read_log_offset = cpu_to_le32(log_offset);
1343                 req->read_log_length = cpu_to_le32(buf_size);
1344                 req->data_buffer_size = cpu_to_le32(buf_size);
1345
1346                 status = be_mcc_notify_wait(adapter);
1347                 if (!status) {
1348                         struct be_cmd_resp_get_fat *resp = get_fat_cmd.va;
1349                         memcpy(buf + offset,
1350                                 resp->data_buffer,
1351                                 resp->read_log_length);
1352                 } else {
1353                         dev_err(&adapter->pdev->dev, "FAT Table Retrieve error\n");
1354                         goto err;
1355                 }
1356                 offset += buf_size;
1357                 log_offset += buf_size;
1358         }
1359 err:
1360         pci_free_consistent(adapter->pdev, get_fat_cmd.size,
1361                         get_fat_cmd.va,
1362                         get_fat_cmd.dma);
1363         spin_unlock_bh(&adapter->mcc_lock);
1364 }
1365
1366 /* Uses Mbox */
1367 int be_cmd_get_fw_ver(struct be_adapter *adapter, char *fw_ver)
1368 {
1369         struct be_mcc_wrb *wrb;
1370         struct be_cmd_req_get_fw_version *req;
1371         int status;
1372
1373         if (mutex_lock_interruptible(&adapter->mbox_lock))
1374                 return -1;
1375
1376         wrb = wrb_from_mbox(adapter);
1377         req = embedded_payload(wrb);
1378
1379         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1380                         OPCODE_COMMON_GET_FW_VERSION);
1381
1382         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1383                 OPCODE_COMMON_GET_FW_VERSION, sizeof(*req));
1384
1385         status = be_mbox_notify_wait(adapter);
1386         if (!status) {
1387                 struct be_cmd_resp_get_fw_version *resp = embedded_payload(wrb);
1388                 strncpy(fw_ver, resp->firmware_version_string, FW_VER_LEN);
1389         }
1390
1391         mutex_unlock(&adapter->mbox_lock);
1392         return status;
1393 }
1394
1395 /* set the EQ delay interval of an EQ to specified value
1396  * Uses async mcc
1397  */
1398 int be_cmd_modify_eqd(struct be_adapter *adapter, u32 eq_id, u32 eqd)
1399 {
1400         struct be_mcc_wrb *wrb;
1401         struct be_cmd_req_modify_eq_delay *req;
1402         int status = 0;
1403
1404         spin_lock_bh(&adapter->mcc_lock);
1405
1406         wrb = wrb_from_mccq(adapter);
1407         if (!wrb) {
1408                 status = -EBUSY;
1409                 goto err;
1410         }
1411         req = embedded_payload(wrb);
1412
1413         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1414                         OPCODE_COMMON_MODIFY_EQ_DELAY);
1415
1416         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1417                 OPCODE_COMMON_MODIFY_EQ_DELAY, sizeof(*req));
1418
1419         req->num_eq = cpu_to_le32(1);
1420         req->delay[0].eq_id = cpu_to_le32(eq_id);
1421         req->delay[0].phase = 0;
1422         req->delay[0].delay_multiplier = cpu_to_le32(eqd);
1423
1424         be_mcc_notify(adapter);
1425
1426 err:
1427         spin_unlock_bh(&adapter->mcc_lock);
1428         return status;
1429 }
1430
1431 /* Uses sycnhronous mcc */
1432 int be_cmd_vlan_config(struct be_adapter *adapter, u32 if_id, u16 *vtag_array,
1433                         u32 num, bool untagged, bool promiscuous)
1434 {
1435         struct be_mcc_wrb *wrb;
1436         struct be_cmd_req_vlan_config *req;
1437         int status;
1438
1439         spin_lock_bh(&adapter->mcc_lock);
1440
1441         wrb = wrb_from_mccq(adapter);
1442         if (!wrb) {
1443                 status = -EBUSY;
1444                 goto err;
1445         }
1446         req = embedded_payload(wrb);
1447
1448         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1449                         OPCODE_COMMON_NTWK_VLAN_CONFIG);
1450
1451         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1452                 OPCODE_COMMON_NTWK_VLAN_CONFIG, sizeof(*req));
1453
1454         req->interface_id = if_id;
1455         req->promiscuous = promiscuous;
1456         req->untagged = untagged;
1457         req->num_vlan = num;
1458         if (!promiscuous) {
1459                 memcpy(req->normal_vlan, vtag_array,
1460                         req->num_vlan * sizeof(vtag_array[0]));
1461         }
1462
1463         status = be_mcc_notify_wait(adapter);
1464
1465 err:
1466         spin_unlock_bh(&adapter->mcc_lock);
1467         return status;
1468 }
1469
1470 /* Uses MCC for this command as it may be called in BH context
1471  * Uses synchronous mcc
1472  */
1473 int be_cmd_promiscuous_config(struct be_adapter *adapter, bool en)
1474 {
1475         struct be_mcc_wrb *wrb;
1476         struct be_cmd_req_rx_filter *req;
1477         struct be_dma_mem promiscous_cmd;
1478         struct be_sge *sge;
1479         int status;
1480
1481         memset(&promiscous_cmd, 0, sizeof(struct be_dma_mem));
1482         promiscous_cmd.size = sizeof(struct be_cmd_req_rx_filter);
1483         promiscous_cmd.va = pci_alloc_consistent(adapter->pdev,
1484                                 promiscous_cmd.size, &promiscous_cmd.dma);
1485         if (!promiscous_cmd.va) {
1486                 dev_err(&adapter->pdev->dev,
1487                                 "Memory allocation failure\n");
1488                 return -ENOMEM;
1489         }
1490
1491         spin_lock_bh(&adapter->mcc_lock);
1492
1493         wrb = wrb_from_mccq(adapter);
1494         if (!wrb) {
1495                 status = -EBUSY;
1496                 goto err;
1497         }
1498
1499         req = promiscous_cmd.va;
1500         sge = nonembedded_sgl(wrb);
1501
1502         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1503                                         OPCODE_COMMON_NTWK_RX_FILTER);
1504         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1505                         OPCODE_COMMON_NTWK_RX_FILTER, sizeof(*req));
1506
1507         req->if_id = cpu_to_le32(adapter->if_handle);
1508         req->if_flags_mask = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS);
1509         if (en)
1510                 req->if_flags = cpu_to_le32(BE_IF_FLAGS_PROMISCUOUS);
1511
1512         sge->pa_hi = cpu_to_le32(upper_32_bits(promiscous_cmd.dma));
1513         sge->pa_lo = cpu_to_le32(promiscous_cmd.dma & 0xFFFFFFFF);
1514         sge->len = cpu_to_le32(promiscous_cmd.size);
1515
1516         status = be_mcc_notify_wait(adapter);
1517
1518 err:
1519         spin_unlock_bh(&adapter->mcc_lock);
1520         pci_free_consistent(adapter->pdev, promiscous_cmd.size,
1521                         promiscous_cmd.va, promiscous_cmd.dma);
1522         return status;
1523 }
1524
1525 /*
1526  * Uses MCC for this command as it may be called in BH context
1527  * (mc == NULL) => multicast promiscuous
1528  */
1529 int be_cmd_multicast_set(struct be_adapter *adapter, u32 if_id,
1530                 struct net_device *netdev, struct be_dma_mem *mem)
1531 {
1532         struct be_mcc_wrb *wrb;
1533         struct be_cmd_req_mcast_mac_config *req = mem->va;
1534         struct be_sge *sge;
1535         int status;
1536
1537         spin_lock_bh(&adapter->mcc_lock);
1538
1539         wrb = wrb_from_mccq(adapter);
1540         if (!wrb) {
1541                 status = -EBUSY;
1542                 goto err;
1543         }
1544         sge = nonembedded_sgl(wrb);
1545         memset(req, 0, sizeof(*req));
1546
1547         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1548                         OPCODE_COMMON_NTWK_MULTICAST_SET);
1549         sge->pa_hi = cpu_to_le32(upper_32_bits(mem->dma));
1550         sge->pa_lo = cpu_to_le32(mem->dma & 0xFFFFFFFF);
1551         sge->len = cpu_to_le32(mem->size);
1552
1553         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1554                 OPCODE_COMMON_NTWK_MULTICAST_SET, sizeof(*req));
1555
1556         req->interface_id = if_id;
1557         if (netdev) {
1558                 int i;
1559                 struct netdev_hw_addr *ha;
1560
1561                 req->num_mac = cpu_to_le16(netdev_mc_count(netdev));
1562
1563                 i = 0;
1564                 netdev_for_each_mc_addr(ha, netdev)
1565                         memcpy(req->mac[i++].byte, ha->addr, ETH_ALEN);
1566         } else {
1567                 req->promiscuous = 1;
1568         }
1569
1570         status = be_mcc_notify_wait(adapter);
1571
1572 err:
1573         spin_unlock_bh(&adapter->mcc_lock);
1574         return status;
1575 }
1576
1577 /* Uses synchrounous mcc */
1578 int be_cmd_set_flow_control(struct be_adapter *adapter, u32 tx_fc, u32 rx_fc)
1579 {
1580         struct be_mcc_wrb *wrb;
1581         struct be_cmd_req_set_flow_control *req;
1582         int status;
1583
1584         spin_lock_bh(&adapter->mcc_lock);
1585
1586         wrb = wrb_from_mccq(adapter);
1587         if (!wrb) {
1588                 status = -EBUSY;
1589                 goto err;
1590         }
1591         req = embedded_payload(wrb);
1592
1593         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1594                         OPCODE_COMMON_SET_FLOW_CONTROL);
1595
1596         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1597                 OPCODE_COMMON_SET_FLOW_CONTROL, sizeof(*req));
1598
1599         req->tx_flow_control = cpu_to_le16((u16)tx_fc);
1600         req->rx_flow_control = cpu_to_le16((u16)rx_fc);
1601
1602         status = be_mcc_notify_wait(adapter);
1603
1604 err:
1605         spin_unlock_bh(&adapter->mcc_lock);
1606         return status;
1607 }
1608
1609 /* Uses sycn mcc */
1610 int be_cmd_get_flow_control(struct be_adapter *adapter, u32 *tx_fc, u32 *rx_fc)
1611 {
1612         struct be_mcc_wrb *wrb;
1613         struct be_cmd_req_get_flow_control *req;
1614         int status;
1615
1616         spin_lock_bh(&adapter->mcc_lock);
1617
1618         wrb = wrb_from_mccq(adapter);
1619         if (!wrb) {
1620                 status = -EBUSY;
1621                 goto err;
1622         }
1623         req = embedded_payload(wrb);
1624
1625         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1626                         OPCODE_COMMON_GET_FLOW_CONTROL);
1627
1628         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1629                 OPCODE_COMMON_GET_FLOW_CONTROL, sizeof(*req));
1630
1631         status = be_mcc_notify_wait(adapter);
1632         if (!status) {
1633                 struct be_cmd_resp_get_flow_control *resp =
1634                                                 embedded_payload(wrb);
1635                 *tx_fc = le16_to_cpu(resp->tx_flow_control);
1636                 *rx_fc = le16_to_cpu(resp->rx_flow_control);
1637         }
1638
1639 err:
1640         spin_unlock_bh(&adapter->mcc_lock);
1641         return status;
1642 }
1643
1644 /* Uses mbox */
1645 int be_cmd_query_fw_cfg(struct be_adapter *adapter, u32 *port_num,
1646                 u32 *mode, u32 *caps)
1647 {
1648         struct be_mcc_wrb *wrb;
1649         struct be_cmd_req_query_fw_cfg *req;
1650         int status;
1651
1652         if (mutex_lock_interruptible(&adapter->mbox_lock))
1653                 return -1;
1654
1655         wrb = wrb_from_mbox(adapter);
1656         req = embedded_payload(wrb);
1657
1658         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1659                         OPCODE_COMMON_QUERY_FIRMWARE_CONFIG);
1660
1661         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1662                 OPCODE_COMMON_QUERY_FIRMWARE_CONFIG, sizeof(*req));
1663
1664         status = be_mbox_notify_wait(adapter);
1665         if (!status) {
1666                 struct be_cmd_resp_query_fw_cfg *resp = embedded_payload(wrb);
1667                 *port_num = le32_to_cpu(resp->phys_port);
1668                 *mode = le32_to_cpu(resp->function_mode);
1669                 *caps = le32_to_cpu(resp->function_caps);
1670         }
1671
1672         mutex_unlock(&adapter->mbox_lock);
1673         return status;
1674 }
1675
1676 /* Uses mbox */
1677 int be_cmd_reset_function(struct be_adapter *adapter)
1678 {
1679         struct be_mcc_wrb *wrb;
1680         struct be_cmd_req_hdr *req;
1681         int status;
1682
1683         if (mutex_lock_interruptible(&adapter->mbox_lock))
1684                 return -1;
1685
1686         wrb = wrb_from_mbox(adapter);
1687         req = embedded_payload(wrb);
1688
1689         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1690                         OPCODE_COMMON_FUNCTION_RESET);
1691
1692         be_cmd_hdr_prepare(req, CMD_SUBSYSTEM_COMMON,
1693                 OPCODE_COMMON_FUNCTION_RESET, sizeof(*req));
1694
1695         status = be_mbox_notify_wait(adapter);
1696
1697         mutex_unlock(&adapter->mbox_lock);
1698         return status;
1699 }
1700
1701 int be_cmd_rss_config(struct be_adapter *adapter, u8 *rsstable, u16 table_size)
1702 {
1703         struct be_mcc_wrb *wrb;
1704         struct be_cmd_req_rss_config *req;
1705         u32 myhash[10];
1706         int status;
1707
1708         if (mutex_lock_interruptible(&adapter->mbox_lock))
1709                 return -1;
1710
1711         wrb = wrb_from_mbox(adapter);
1712         req = embedded_payload(wrb);
1713
1714         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1715                 OPCODE_ETH_RSS_CONFIG);
1716
1717         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1718                 OPCODE_ETH_RSS_CONFIG, sizeof(*req));
1719
1720         req->if_id = cpu_to_le32(adapter->if_handle);
1721         req->enable_rss = cpu_to_le16(RSS_ENABLE_TCP_IPV4 | RSS_ENABLE_IPV4);
1722         req->cpu_table_size_log2 = cpu_to_le16(fls(table_size) - 1);
1723         memcpy(req->cpu_table, rsstable, table_size);
1724         memcpy(req->hash, myhash, sizeof(myhash));
1725         be_dws_cpu_to_le(req->hash, sizeof(req->hash));
1726
1727         status = be_mbox_notify_wait(adapter);
1728
1729         mutex_unlock(&adapter->mbox_lock);
1730         return status;
1731 }
1732
1733 /* Uses sync mcc */
1734 int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1735                         u8 bcn, u8 sts, u8 state)
1736 {
1737         struct be_mcc_wrb *wrb;
1738         struct be_cmd_req_enable_disable_beacon *req;
1739         int status;
1740
1741         spin_lock_bh(&adapter->mcc_lock);
1742
1743         wrb = wrb_from_mccq(adapter);
1744         if (!wrb) {
1745                 status = -EBUSY;
1746                 goto err;
1747         }
1748         req = embedded_payload(wrb);
1749
1750         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1751                         OPCODE_COMMON_ENABLE_DISABLE_BEACON);
1752
1753         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1754                 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
1755
1756         req->port_num = port_num;
1757         req->beacon_state = state;
1758         req->beacon_duration = bcn;
1759         req->status_duration = sts;
1760
1761         status = be_mcc_notify_wait(adapter);
1762
1763 err:
1764         spin_unlock_bh(&adapter->mcc_lock);
1765         return status;
1766 }
1767
1768 /* Uses sync mcc */
1769 int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1770 {
1771         struct be_mcc_wrb *wrb;
1772         struct be_cmd_req_get_beacon_state *req;
1773         int status;
1774
1775         spin_lock_bh(&adapter->mcc_lock);
1776
1777         wrb = wrb_from_mccq(adapter);
1778         if (!wrb) {
1779                 status = -EBUSY;
1780                 goto err;
1781         }
1782         req = embedded_payload(wrb);
1783
1784         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1785                         OPCODE_COMMON_GET_BEACON_STATE);
1786
1787         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1788                 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
1789
1790         req->port_num = port_num;
1791
1792         status = be_mcc_notify_wait(adapter);
1793         if (!status) {
1794                 struct be_cmd_resp_get_beacon_state *resp =
1795                                                 embedded_payload(wrb);
1796                 *state = resp->beacon_state;
1797         }
1798
1799 err:
1800         spin_unlock_bh(&adapter->mcc_lock);
1801         return status;
1802 }
1803
1804 int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1805                         u32 flash_type, u32 flash_opcode, u32 buf_size)
1806 {
1807         struct be_mcc_wrb *wrb;
1808         struct be_cmd_write_flashrom *req;
1809         struct be_sge *sge;
1810         int status;
1811
1812         spin_lock_bh(&adapter->mcc_lock);
1813         adapter->flash_status = 0;
1814
1815         wrb = wrb_from_mccq(adapter);
1816         if (!wrb) {
1817                 status = -EBUSY;
1818                 goto err_unlock;
1819         }
1820         req = cmd->va;
1821         sge = nonembedded_sgl(wrb);
1822
1823         be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
1824                         OPCODE_COMMON_WRITE_FLASHROM);
1825         wrb->tag1 = CMD_SUBSYSTEM_COMMON;
1826
1827         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1828                 OPCODE_COMMON_WRITE_FLASHROM, cmd->size);
1829         sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
1830         sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
1831         sge->len = cpu_to_le32(cmd->size);
1832
1833         req->params.op_type = cpu_to_le32(flash_type);
1834         req->params.op_code = cpu_to_le32(flash_opcode);
1835         req->params.data_buf_size = cpu_to_le32(buf_size);
1836
1837         be_mcc_notify(adapter);
1838         spin_unlock_bh(&adapter->mcc_lock);
1839
1840         if (!wait_for_completion_timeout(&adapter->flash_compl,
1841                         msecs_to_jiffies(12000)))
1842                 status = -1;
1843         else
1844                 status = adapter->flash_status;
1845
1846         return status;
1847
1848 err_unlock:
1849         spin_unlock_bh(&adapter->mcc_lock);
1850         return status;
1851 }
1852
1853 int be_cmd_get_flash_crc(struct be_adapter *adapter, u8 *flashed_crc,
1854                          int offset)
1855 {
1856         struct be_mcc_wrb *wrb;
1857         struct be_cmd_write_flashrom *req;
1858         int status;
1859
1860         spin_lock_bh(&adapter->mcc_lock);
1861
1862         wrb = wrb_from_mccq(adapter);
1863         if (!wrb) {
1864                 status = -EBUSY;
1865                 goto err;
1866         }
1867         req = embedded_payload(wrb);
1868
1869         be_wrb_hdr_prepare(wrb, sizeof(*req)+4, true, 0,
1870                         OPCODE_COMMON_READ_FLASHROM);
1871
1872         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1873                 OPCODE_COMMON_READ_FLASHROM, sizeof(*req)+4);
1874
1875         req->params.op_type = cpu_to_le32(IMG_TYPE_REDBOOT);
1876         req->params.op_code = cpu_to_le32(FLASHROM_OPER_REPORT);
1877         req->params.offset = cpu_to_le32(offset);
1878         req->params.data_buf_size = cpu_to_le32(0x4);
1879
1880         status = be_mcc_notify_wait(adapter);
1881         if (!status)
1882                 memcpy(flashed_crc, req->params.data_buf, 4);
1883
1884 err:
1885         spin_unlock_bh(&adapter->mcc_lock);
1886         return status;
1887 }
1888
1889 int be_cmd_enable_magic_wol(struct be_adapter *adapter, u8 *mac,
1890                                 struct be_dma_mem *nonemb_cmd)
1891 {
1892         struct be_mcc_wrb *wrb;
1893         struct be_cmd_req_acpi_wol_magic_config *req;
1894         struct be_sge *sge;
1895         int status;
1896
1897         spin_lock_bh(&adapter->mcc_lock);
1898
1899         wrb = wrb_from_mccq(adapter);
1900         if (!wrb) {
1901                 status = -EBUSY;
1902                 goto err;
1903         }
1904         req = nonemb_cmd->va;
1905         sge = nonembedded_sgl(wrb);
1906
1907         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
1908                         OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG);
1909
1910         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_ETH,
1911                 OPCODE_ETH_ACPI_WOL_MAGIC_CONFIG, sizeof(*req));
1912         memcpy(req->magic_mac, mac, ETH_ALEN);
1913
1914         sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
1915         sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
1916         sge->len = cpu_to_le32(nonemb_cmd->size);
1917
1918         status = be_mcc_notify_wait(adapter);
1919
1920 err:
1921         spin_unlock_bh(&adapter->mcc_lock);
1922         return status;
1923 }
1924
1925 int be_cmd_set_loopback(struct be_adapter *adapter, u8 port_num,
1926                         u8 loopback_type, u8 enable)
1927 {
1928         struct be_mcc_wrb *wrb;
1929         struct be_cmd_req_set_lmode *req;
1930         int status;
1931
1932         spin_lock_bh(&adapter->mcc_lock);
1933
1934         wrb = wrb_from_mccq(adapter);
1935         if (!wrb) {
1936                 status = -EBUSY;
1937                 goto err;
1938         }
1939
1940         req = embedded_payload(wrb);
1941
1942         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1943                                 OPCODE_LOWLEVEL_SET_LOOPBACK_MODE);
1944
1945         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1946                         OPCODE_LOWLEVEL_SET_LOOPBACK_MODE,
1947                         sizeof(*req));
1948
1949         req->src_port = port_num;
1950         req->dest_port = port_num;
1951         req->loopback_type = loopback_type;
1952         req->loopback_state = enable;
1953
1954         status = be_mcc_notify_wait(adapter);
1955 err:
1956         spin_unlock_bh(&adapter->mcc_lock);
1957         return status;
1958 }
1959
1960 int be_cmd_loopback_test(struct be_adapter *adapter, u32 port_num,
1961                 u32 loopback_type, u32 pkt_size, u32 num_pkts, u64 pattern)
1962 {
1963         struct be_mcc_wrb *wrb;
1964         struct be_cmd_req_loopback_test *req;
1965         int status;
1966
1967         spin_lock_bh(&adapter->mcc_lock);
1968
1969         wrb = wrb_from_mccq(adapter);
1970         if (!wrb) {
1971                 status = -EBUSY;
1972                 goto err;
1973         }
1974
1975         req = embedded_payload(wrb);
1976
1977         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
1978                                 OPCODE_LOWLEVEL_LOOPBACK_TEST);
1979
1980         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
1981                         OPCODE_LOWLEVEL_LOOPBACK_TEST, sizeof(*req));
1982         req->hdr.timeout = cpu_to_le32(4);
1983
1984         req->pattern = cpu_to_le64(pattern);
1985         req->src_port = cpu_to_le32(port_num);
1986         req->dest_port = cpu_to_le32(port_num);
1987         req->pkt_size = cpu_to_le32(pkt_size);
1988         req->num_pkts = cpu_to_le32(num_pkts);
1989         req->loopback_type = cpu_to_le32(loopback_type);
1990
1991         status = be_mcc_notify_wait(adapter);
1992         if (!status) {
1993                 struct be_cmd_resp_loopback_test *resp = embedded_payload(wrb);
1994                 status = le32_to_cpu(resp->status);
1995         }
1996
1997 err:
1998         spin_unlock_bh(&adapter->mcc_lock);
1999         return status;
2000 }
2001
2002 int be_cmd_ddr_dma_test(struct be_adapter *adapter, u64 pattern,
2003                                 u32 byte_cnt, struct be_dma_mem *cmd)
2004 {
2005         struct be_mcc_wrb *wrb;
2006         struct be_cmd_req_ddrdma_test *req;
2007         struct be_sge *sge;
2008         int status;
2009         int i, j = 0;
2010
2011         spin_lock_bh(&adapter->mcc_lock);
2012
2013         wrb = wrb_from_mccq(adapter);
2014         if (!wrb) {
2015                 status = -EBUSY;
2016                 goto err;
2017         }
2018         req = cmd->va;
2019         sge = nonembedded_sgl(wrb);
2020         be_wrb_hdr_prepare(wrb, cmd->size, false, 1,
2021                                 OPCODE_LOWLEVEL_HOST_DDR_DMA);
2022         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_LOWLEVEL,
2023                         OPCODE_LOWLEVEL_HOST_DDR_DMA, cmd->size);
2024
2025         sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
2026         sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
2027         sge->len = cpu_to_le32(cmd->size);
2028
2029         req->pattern = cpu_to_le64(pattern);
2030         req->byte_count = cpu_to_le32(byte_cnt);
2031         for (i = 0; i < byte_cnt; i++) {
2032                 req->snd_buff[i] = (u8)(pattern >> (j*8));
2033                 j++;
2034                 if (j > 7)
2035                         j = 0;
2036         }
2037
2038         status = be_mcc_notify_wait(adapter);
2039
2040         if (!status) {
2041                 struct be_cmd_resp_ddrdma_test *resp;
2042                 resp = cmd->va;
2043                 if ((memcmp(resp->rcv_buff, req->snd_buff, byte_cnt) != 0) ||
2044                                 resp->snd_err) {
2045                         status = -1;
2046                 }
2047         }
2048
2049 err:
2050         spin_unlock_bh(&adapter->mcc_lock);
2051         return status;
2052 }
2053
2054 int be_cmd_get_seeprom_data(struct be_adapter *adapter,
2055                                 struct be_dma_mem *nonemb_cmd)
2056 {
2057         struct be_mcc_wrb *wrb;
2058         struct be_cmd_req_seeprom_read *req;
2059         struct be_sge *sge;
2060         int status;
2061
2062         spin_lock_bh(&adapter->mcc_lock);
2063
2064         wrb = wrb_from_mccq(adapter);
2065         if (!wrb) {
2066                 status = -EBUSY;
2067                 goto err;
2068         }
2069         req = nonemb_cmd->va;
2070         sge = nonembedded_sgl(wrb);
2071
2072         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
2073                         OPCODE_COMMON_SEEPROM_READ);
2074
2075         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2076                         OPCODE_COMMON_SEEPROM_READ, sizeof(*req));
2077
2078         sge->pa_hi = cpu_to_le32(upper_32_bits(nonemb_cmd->dma));
2079         sge->pa_lo = cpu_to_le32(nonemb_cmd->dma & 0xFFFFFFFF);
2080         sge->len = cpu_to_le32(nonemb_cmd->size);
2081
2082         status = be_mcc_notify_wait(adapter);
2083
2084 err:
2085         spin_unlock_bh(&adapter->mcc_lock);
2086         return status;
2087 }
2088
2089 int be_cmd_get_phy_info(struct be_adapter *adapter, struct be_dma_mem *cmd)
2090 {
2091         struct be_mcc_wrb *wrb;
2092         struct be_cmd_req_get_phy_info *req;
2093         struct be_sge *sge;
2094         int status;
2095
2096         spin_lock_bh(&adapter->mcc_lock);
2097
2098         wrb = wrb_from_mccq(adapter);
2099         if (!wrb) {
2100                 status = -EBUSY;
2101                 goto err;
2102         }
2103
2104         req = cmd->va;
2105         sge = nonembedded_sgl(wrb);
2106
2107         be_wrb_hdr_prepare(wrb, sizeof(*req), false, 1,
2108                                 OPCODE_COMMON_GET_PHY_DETAILS);
2109
2110         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2111                         OPCODE_COMMON_GET_PHY_DETAILS,
2112                         sizeof(*req));
2113
2114         sge->pa_hi = cpu_to_le32(upper_32_bits(cmd->dma));
2115         sge->pa_lo = cpu_to_le32(cmd->dma & 0xFFFFFFFF);
2116         sge->len = cpu_to_le32(cmd->size);
2117
2118         status = be_mcc_notify_wait(adapter);
2119 err:
2120         spin_unlock_bh(&adapter->mcc_lock);
2121         return status;
2122 }
2123
2124 int be_cmd_set_qos(struct be_adapter *adapter, u32 bps, u32 domain)
2125 {
2126         struct be_mcc_wrb *wrb;
2127         struct be_cmd_req_set_qos *req;
2128         int status;
2129
2130         spin_lock_bh(&adapter->mcc_lock);
2131
2132         wrb = wrb_from_mccq(adapter);
2133         if (!wrb) {
2134                 status = -EBUSY;
2135                 goto err;
2136         }
2137
2138         req = embedded_payload(wrb);
2139
2140         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
2141                                 OPCODE_COMMON_SET_QOS);
2142
2143         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2144                         OPCODE_COMMON_SET_QOS, sizeof(*req));
2145
2146         req->hdr.domain = domain;
2147         req->valid_bits = cpu_to_le32(BE_QOS_BITS_NIC);
2148         req->max_bps_nic = cpu_to_le32(bps);
2149
2150         status = be_mcc_notify_wait(adapter);
2151
2152 err:
2153         spin_unlock_bh(&adapter->mcc_lock);
2154         return status;
2155 }
2156
2157 int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
2158 {
2159         struct be_mcc_wrb *wrb;
2160         struct be_cmd_req_cntl_attribs *req;
2161         struct be_cmd_resp_cntl_attribs *resp;
2162         struct be_sge *sge;
2163         int status;
2164         int payload_len = max(sizeof(*req), sizeof(*resp));
2165         struct mgmt_controller_attrib *attribs;
2166         struct be_dma_mem attribs_cmd;
2167
2168         memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
2169         attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
2170         attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
2171                                                 &attribs_cmd.dma);
2172         if (!attribs_cmd.va) {
2173                 dev_err(&adapter->pdev->dev,
2174                                 "Memory allocation failure\n");
2175                 return -ENOMEM;
2176         }
2177
2178         if (mutex_lock_interruptible(&adapter->mbox_lock))
2179                 return -1;
2180
2181         wrb = wrb_from_mbox(adapter);
2182         if (!wrb) {
2183                 status = -EBUSY;
2184                 goto err;
2185         }
2186         req = attribs_cmd.va;
2187         sge = nonembedded_sgl(wrb);
2188
2189         be_wrb_hdr_prepare(wrb, payload_len, false, 1,
2190                         OPCODE_COMMON_GET_CNTL_ATTRIBUTES);
2191         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2192                          OPCODE_COMMON_GET_CNTL_ATTRIBUTES, payload_len);
2193         sge->pa_hi = cpu_to_le32(upper_32_bits(attribs_cmd.dma));
2194         sge->pa_lo = cpu_to_le32(attribs_cmd.dma & 0xFFFFFFFF);
2195         sge->len = cpu_to_le32(attribs_cmd.size);
2196
2197         status = be_mbox_notify_wait(adapter);
2198         if (!status) {
2199                 attribs = (struct mgmt_controller_attrib *)( attribs_cmd.va +
2200                                         sizeof(struct be_cmd_resp_hdr));
2201                 adapter->hba_port_num = attribs->hba_attribs.phy_port;
2202         }
2203
2204 err:
2205         mutex_unlock(&adapter->mbox_lock);
2206         pci_free_consistent(adapter->pdev, attribs_cmd.size, attribs_cmd.va,
2207                                         attribs_cmd.dma);
2208         return status;
2209 }
2210
2211 /* Uses mbox */
2212 int be_cmd_check_native_mode(struct be_adapter *adapter)
2213 {
2214         struct be_mcc_wrb *wrb;
2215         struct be_cmd_req_set_func_cap *req;
2216         int status;
2217
2218         if (mutex_lock_interruptible(&adapter->mbox_lock))
2219                 return -1;
2220
2221         wrb = wrb_from_mbox(adapter);
2222         if (!wrb) {
2223                 status = -EBUSY;
2224                 goto err;
2225         }
2226
2227         req = embedded_payload(wrb);
2228
2229         be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0,
2230                 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP);
2231
2232         be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
2233                 OPCODE_COMMON_SET_DRIVER_FUNCTION_CAP, sizeof(*req));
2234
2235         req->valid_cap_flags = cpu_to_le32(CAPABILITY_SW_TIMESTAMPS |
2236                                 CAPABILITY_BE3_NATIVE_ERX_API);
2237         req->cap_flags = cpu_to_le32(CAPABILITY_BE3_NATIVE_ERX_API);
2238
2239         status = be_mbox_notify_wait(adapter);
2240         if (!status) {
2241                 struct be_cmd_resp_set_func_cap *resp = embedded_payload(wrb);
2242                 adapter->be3_native = le32_to_cpu(resp->cap_flags) &
2243                                         CAPABILITY_BE3_NATIVE_ERX_API;
2244         }
2245 err:
2246         mutex_unlock(&adapter->mbox_lock);
2247         return status;
2248 }