]> Pileus Git - ~andy/linux/blob - drivers/net/ethernet/qlogic/qlcnic/qlcnic_sriov_common.c
bonding: fix u64 division
[~andy/linux] / drivers / net / ethernet / qlogic / qlcnic / qlcnic_sriov_common.c
1 /*
2  * QLogic qlcnic NIC Driver
3  * Copyright (c) 2009-2013 QLogic Corporation
4  *
5  * See LICENSE.qlcnic for copyright and licensing details.
6  */
7
8 #include "qlcnic_sriov.h"
9 #include "qlcnic.h"
10 #include "qlcnic_83xx_hw.h"
11 #include <linux/types.h>
12
13 #define QLC_BC_COMMAND  0
14 #define QLC_BC_RESPONSE 1
15
16 #define QLC_MBOX_RESP_TIMEOUT           (10 * HZ)
17 #define QLC_MBOX_CH_FREE_TIMEOUT        (10 * HZ)
18
19 #define QLC_BC_MSG              0
20 #define QLC_BC_CFREE            1
21 #define QLC_BC_FLR              2
22 #define QLC_BC_HDR_SZ           16
23 #define QLC_BC_PAYLOAD_SZ       (1024 - QLC_BC_HDR_SZ)
24
25 #define QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF            2048
26 #define QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF      512
27
28 #define QLC_83XX_VF_RESET_FAIL_THRESH   8
29 #define QLC_BC_CMD_MAX_RETRY_CNT        5
30
31 static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *);
32 static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *, u32);
33 static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *);
34 static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *);
35 static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *);
36 static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *,
37                                   struct qlcnic_cmd_args *);
38 static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *, u8);
39 static void qlcnic_sriov_process_bc_cmd(struct work_struct *);
40 static int qlcnic_sriov_vf_shutdown(struct pci_dev *);
41 static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *);
42
43 static struct qlcnic_hardware_ops qlcnic_sriov_vf_hw_ops = {
44         .read_crb                       = qlcnic_83xx_read_crb,
45         .write_crb                      = qlcnic_83xx_write_crb,
46         .read_reg                       = qlcnic_83xx_rd_reg_indirect,
47         .write_reg                      = qlcnic_83xx_wrt_reg_indirect,
48         .get_mac_address                = qlcnic_83xx_get_mac_address,
49         .setup_intr                     = qlcnic_83xx_setup_intr,
50         .alloc_mbx_args                 = qlcnic_83xx_alloc_mbx_args,
51         .mbx_cmd                        = qlcnic_sriov_issue_cmd,
52         .get_func_no                    = qlcnic_83xx_get_func_no,
53         .api_lock                       = qlcnic_83xx_cam_lock,
54         .api_unlock                     = qlcnic_83xx_cam_unlock,
55         .process_lb_rcv_ring_diag       = qlcnic_83xx_process_rcv_ring_diag,
56         .create_rx_ctx                  = qlcnic_83xx_create_rx_ctx,
57         .create_tx_ctx                  = qlcnic_83xx_create_tx_ctx,
58         .del_rx_ctx                     = qlcnic_83xx_del_rx_ctx,
59         .del_tx_ctx                     = qlcnic_83xx_del_tx_ctx,
60         .setup_link_event               = qlcnic_83xx_setup_link_event,
61         .get_nic_info                   = qlcnic_83xx_get_nic_info,
62         .get_pci_info                   = qlcnic_83xx_get_pci_info,
63         .set_nic_info                   = qlcnic_83xx_set_nic_info,
64         .change_macvlan                 = qlcnic_83xx_sre_macaddr_change,
65         .napi_enable                    = qlcnic_83xx_napi_enable,
66         .napi_disable                   = qlcnic_83xx_napi_disable,
67         .config_intr_coal               = qlcnic_83xx_config_intr_coal,
68         .config_rss                     = qlcnic_83xx_config_rss,
69         .config_hw_lro                  = qlcnic_83xx_config_hw_lro,
70         .config_promisc_mode            = qlcnic_83xx_nic_set_promisc,
71         .change_l2_filter               = qlcnic_83xx_change_l2_filter,
72         .get_board_info                 = qlcnic_83xx_get_port_info,
73         .free_mac_list                  = qlcnic_sriov_vf_free_mac_list,
74 };
75
76 static struct qlcnic_nic_template qlcnic_sriov_vf_ops = {
77         .config_bridged_mode    = qlcnic_config_bridged_mode,
78         .config_led             = qlcnic_config_led,
79         .cancel_idc_work        = qlcnic_sriov_vf_cancel_fw_work,
80         .napi_add               = qlcnic_83xx_napi_add,
81         .napi_del               = qlcnic_83xx_napi_del,
82         .shutdown               = qlcnic_sriov_vf_shutdown,
83         .resume                 = qlcnic_sriov_vf_resume,
84         .config_ipaddr          = qlcnic_83xx_config_ipaddr,
85         .clear_legacy_intr      = qlcnic_83xx_clear_legacy_intr,
86 };
87
88 static const struct qlcnic_mailbox_metadata qlcnic_sriov_bc_mbx_tbl[] = {
89         {QLCNIC_BC_CMD_CHANNEL_INIT, 2, 2},
90         {QLCNIC_BC_CMD_CHANNEL_TERM, 2, 2},
91         {QLCNIC_BC_CMD_GET_ACL, 3, 14},
92         {QLCNIC_BC_CMD_CFG_GUEST_VLAN, 2, 2},
93 };
94
95 static inline bool qlcnic_sriov_bc_msg_check(u32 val)
96 {
97         return (val & (1 << QLC_BC_MSG)) ? true : false;
98 }
99
100 static inline bool qlcnic_sriov_channel_free_check(u32 val)
101 {
102         return (val & (1 << QLC_BC_CFREE)) ? true : false;
103 }
104
105 static inline bool qlcnic_sriov_flr_check(u32 val)
106 {
107         return (val & (1 << QLC_BC_FLR)) ? true : false;
108 }
109
110 static inline u8 qlcnic_sriov_target_func_id(u32 val)
111 {
112         return (val >> 4) & 0xff;
113 }
114
115 static int qlcnic_sriov_virtid_fn(struct qlcnic_adapter *adapter, int vf_id)
116 {
117         struct pci_dev *dev = adapter->pdev;
118         int pos;
119         u16 stride, offset;
120
121         if (qlcnic_sriov_vf_check(adapter))
122                 return 0;
123
124         pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV);
125         pci_read_config_word(dev, pos + PCI_SRIOV_VF_OFFSET, &offset);
126         pci_read_config_word(dev, pos + PCI_SRIOV_VF_STRIDE, &stride);
127
128         return (dev->devfn + offset + stride * vf_id) & 0xff;
129 }
130
131 int qlcnic_sriov_init(struct qlcnic_adapter *adapter, int num_vfs)
132 {
133         struct qlcnic_sriov *sriov;
134         struct qlcnic_back_channel *bc;
135         struct workqueue_struct *wq;
136         struct qlcnic_vport *vp;
137         struct qlcnic_vf_info *vf;
138         int err, i;
139
140         if (!qlcnic_sriov_enable_check(adapter))
141                 return -EIO;
142
143         sriov  = kzalloc(sizeof(struct qlcnic_sriov), GFP_KERNEL);
144         if (!sriov)
145                 return -ENOMEM;
146
147         adapter->ahw->sriov = sriov;
148         sriov->num_vfs = num_vfs;
149         bc = &sriov->bc;
150         sriov->vf_info = kzalloc(sizeof(struct qlcnic_vf_info) *
151                                  num_vfs, GFP_KERNEL);
152         if (!sriov->vf_info) {
153                 err = -ENOMEM;
154                 goto qlcnic_free_sriov;
155         }
156
157         wq = create_singlethread_workqueue("bc-trans");
158         if (wq == NULL) {
159                 err = -ENOMEM;
160                 dev_err(&adapter->pdev->dev,
161                         "Cannot create bc-trans workqueue\n");
162                 goto qlcnic_free_vf_info;
163         }
164
165         bc->bc_trans_wq = wq;
166
167         wq = create_singlethread_workqueue("async");
168         if (wq == NULL) {
169                 err = -ENOMEM;
170                 dev_err(&adapter->pdev->dev, "Cannot create async workqueue\n");
171                 goto qlcnic_destroy_trans_wq;
172         }
173
174         bc->bc_async_wq =  wq;
175         INIT_LIST_HEAD(&bc->async_list);
176
177         for (i = 0; i < num_vfs; i++) {
178                 vf = &sriov->vf_info[i];
179                 vf->adapter = adapter;
180                 vf->pci_func = qlcnic_sriov_virtid_fn(adapter, i);
181                 mutex_init(&vf->send_cmd_lock);
182                 mutex_init(&vf->vlan_list_lock);
183                 INIT_LIST_HEAD(&vf->rcv_act.wait_list);
184                 INIT_LIST_HEAD(&vf->rcv_pend.wait_list);
185                 spin_lock_init(&vf->rcv_act.lock);
186                 spin_lock_init(&vf->rcv_pend.lock);
187                 init_completion(&vf->ch_free_cmpl);
188
189                 INIT_WORK(&vf->trans_work, qlcnic_sriov_process_bc_cmd);
190
191                 if (qlcnic_sriov_pf_check(adapter)) {
192                         vp = kzalloc(sizeof(struct qlcnic_vport), GFP_KERNEL);
193                         if (!vp) {
194                                 err = -ENOMEM;
195                                 goto qlcnic_destroy_async_wq;
196                         }
197                         sriov->vf_info[i].vp = vp;
198                         vp->max_tx_bw = MAX_BW;
199                         vp->spoofchk = true;
200                         random_ether_addr(vp->mac);
201                         dev_info(&adapter->pdev->dev,
202                                  "MAC Address %pM is configured for VF %d\n",
203                                  vp->mac, i);
204                 }
205         }
206
207         return 0;
208
209 qlcnic_destroy_async_wq:
210         destroy_workqueue(bc->bc_async_wq);
211
212 qlcnic_destroy_trans_wq:
213         destroy_workqueue(bc->bc_trans_wq);
214
215 qlcnic_free_vf_info:
216         kfree(sriov->vf_info);
217
218 qlcnic_free_sriov:
219         kfree(adapter->ahw->sriov);
220         return err;
221 }
222
223 void qlcnic_sriov_cleanup_list(struct qlcnic_trans_list *t_list)
224 {
225         struct qlcnic_bc_trans *trans;
226         struct qlcnic_cmd_args cmd;
227         unsigned long flags;
228
229         spin_lock_irqsave(&t_list->lock, flags);
230
231         while (!list_empty(&t_list->wait_list)) {
232                 trans = list_first_entry(&t_list->wait_list,
233                                          struct qlcnic_bc_trans, list);
234                 list_del(&trans->list);
235                 t_list->count--;
236                 cmd.req.arg = (u32 *)trans->req_pay;
237                 cmd.rsp.arg = (u32 *)trans->rsp_pay;
238                 qlcnic_free_mbx_args(&cmd);
239                 qlcnic_sriov_cleanup_transaction(trans);
240         }
241
242         spin_unlock_irqrestore(&t_list->lock, flags);
243 }
244
245 void __qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
246 {
247         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
248         struct qlcnic_back_channel *bc = &sriov->bc;
249         struct qlcnic_vf_info *vf;
250         int i;
251
252         if (!qlcnic_sriov_enable_check(adapter))
253                 return;
254
255         qlcnic_sriov_cleanup_async_list(bc);
256         destroy_workqueue(bc->bc_async_wq);
257
258         for (i = 0; i < sriov->num_vfs; i++) {
259                 vf = &sriov->vf_info[i];
260                 qlcnic_sriov_cleanup_list(&vf->rcv_pend);
261                 cancel_work_sync(&vf->trans_work);
262                 qlcnic_sriov_cleanup_list(&vf->rcv_act);
263         }
264
265         destroy_workqueue(bc->bc_trans_wq);
266
267         for (i = 0; i < sriov->num_vfs; i++)
268                 kfree(sriov->vf_info[i].vp);
269
270         kfree(sriov->vf_info);
271         kfree(adapter->ahw->sriov);
272 }
273
274 static void qlcnic_sriov_vf_cleanup(struct qlcnic_adapter *adapter)
275 {
276         qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
277         qlcnic_sriov_cfg_bc_intr(adapter, 0);
278         __qlcnic_sriov_cleanup(adapter);
279 }
280
281 void qlcnic_sriov_cleanup(struct qlcnic_adapter *adapter)
282 {
283         if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
284                 return;
285
286         qlcnic_sriov_free_vlans(adapter);
287
288         if (qlcnic_sriov_pf_check(adapter))
289                 qlcnic_sriov_pf_cleanup(adapter);
290
291         if (qlcnic_sriov_vf_check(adapter))
292                 qlcnic_sriov_vf_cleanup(adapter);
293 }
294
295 static int qlcnic_sriov_post_bc_msg(struct qlcnic_adapter *adapter, u32 *hdr,
296                                     u32 *pay, u8 pci_func, u8 size)
297 {
298         struct qlcnic_hardware_context *ahw = adapter->ahw;
299         struct qlcnic_mailbox *mbx = ahw->mailbox;
300         struct qlcnic_cmd_args cmd;
301         unsigned long timeout;
302         int err;
303
304         memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
305         cmd.hdr = hdr;
306         cmd.pay = pay;
307         cmd.pay_size = size;
308         cmd.func_num = pci_func;
309         cmd.op_type = QLC_83XX_MBX_POST_BC_OP;
310         cmd.cmd_op = ((struct qlcnic_bc_hdr *)hdr)->cmd_op;
311
312         err = mbx->ops->enqueue_cmd(adapter, &cmd, &timeout);
313         if (err) {
314                 dev_err(&adapter->pdev->dev,
315                         "%s: Mailbox not available, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
316                         __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
317                         ahw->op_mode);
318                 return err;
319         }
320
321         if (!wait_for_completion_timeout(&cmd.completion, timeout)) {
322                 dev_err(&adapter->pdev->dev,
323                         "%s: Mailbox command timed out, cmd_op=0x%x, cmd_type=0x%x, pci_func=0x%x, op_mode=0x%x\n",
324                         __func__, cmd.cmd_op, cmd.type, ahw->pci_func,
325                         ahw->op_mode);
326                 flush_workqueue(mbx->work_q);
327         }
328
329         return cmd.rsp_opcode;
330 }
331
332 static void qlcnic_sriov_vf_cfg_buff_desc(struct qlcnic_adapter *adapter)
333 {
334         adapter->num_rxd = QLC_DEFAULT_RCV_DESCRIPTORS_SRIOV_VF;
335         adapter->max_rxd = MAX_RCV_DESCRIPTORS_10G;
336         adapter->num_jumbo_rxd = QLC_DEFAULT_JUMBO_RCV_DESCRIPTORS_SRIOV_VF;
337         adapter->max_jumbo_rxd = MAX_JUMBO_RCV_DESCRIPTORS_10G;
338         adapter->num_txd = MAX_CMD_DESCRIPTORS;
339         adapter->max_rds_rings = MAX_RDS_RINGS;
340 }
341
342 int qlcnic_sriov_get_vf_vport_info(struct qlcnic_adapter *adapter,
343                                    struct qlcnic_info *npar_info, u16 vport_id)
344 {
345         struct device *dev = &adapter->pdev->dev;
346         struct qlcnic_cmd_args cmd;
347         int err;
348         u32 status;
349
350         err = qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_GET_NIC_INFO);
351         if (err)
352                 return err;
353
354         cmd.req.arg[1] = vport_id << 16 | 0x1;
355         err = qlcnic_issue_cmd(adapter, &cmd);
356         if (err) {
357                 dev_err(&adapter->pdev->dev,
358                         "Failed to get vport info, err=%d\n", err);
359                 qlcnic_free_mbx_args(&cmd);
360                 return err;
361         }
362
363         status = cmd.rsp.arg[2] & 0xffff;
364         if (status & BIT_0)
365                 npar_info->min_tx_bw = MSW(cmd.rsp.arg[2]);
366         if (status & BIT_1)
367                 npar_info->max_tx_bw = LSW(cmd.rsp.arg[3]);
368         if (status & BIT_2)
369                 npar_info->max_tx_ques = MSW(cmd.rsp.arg[3]);
370         if (status & BIT_3)
371                 npar_info->max_tx_mac_filters = LSW(cmd.rsp.arg[4]);
372         if (status & BIT_4)
373                 npar_info->max_rx_mcast_mac_filters = MSW(cmd.rsp.arg[4]);
374         if (status & BIT_5)
375                 npar_info->max_rx_ucast_mac_filters = LSW(cmd.rsp.arg[5]);
376         if (status & BIT_6)
377                 npar_info->max_rx_ip_addr = MSW(cmd.rsp.arg[5]);
378         if (status & BIT_7)
379                 npar_info->max_rx_lro_flow = LSW(cmd.rsp.arg[6]);
380         if (status & BIT_8)
381                 npar_info->max_rx_status_rings = MSW(cmd.rsp.arg[6]);
382         if (status & BIT_9)
383                 npar_info->max_rx_buf_rings = LSW(cmd.rsp.arg[7]);
384
385         npar_info->max_rx_ques = MSW(cmd.rsp.arg[7]);
386         npar_info->max_tx_vlan_keys = LSW(cmd.rsp.arg[8]);
387         npar_info->max_local_ipv6_addrs = MSW(cmd.rsp.arg[8]);
388         npar_info->max_remote_ipv6_addrs = LSW(cmd.rsp.arg[9]);
389
390         dev_info(dev, "\n\tmin_tx_bw: %d, max_tx_bw: %d max_tx_ques: %d,\n"
391                  "\tmax_tx_mac_filters: %d max_rx_mcast_mac_filters: %d,\n"
392                  "\tmax_rx_ucast_mac_filters: 0x%x, max_rx_ip_addr: %d,\n"
393                  "\tmax_rx_lro_flow: %d max_rx_status_rings: %d,\n"
394                  "\tmax_rx_buf_rings: %d, max_rx_ques: %d, max_tx_vlan_keys %d\n"
395                  "\tlocal_ipv6_addr: %d, remote_ipv6_addr: %d\n",
396                  npar_info->min_tx_bw, npar_info->max_tx_bw,
397                  npar_info->max_tx_ques, npar_info->max_tx_mac_filters,
398                  npar_info->max_rx_mcast_mac_filters,
399                  npar_info->max_rx_ucast_mac_filters, npar_info->max_rx_ip_addr,
400                  npar_info->max_rx_lro_flow, npar_info->max_rx_status_rings,
401                  npar_info->max_rx_buf_rings, npar_info->max_rx_ques,
402                  npar_info->max_tx_vlan_keys, npar_info->max_local_ipv6_addrs,
403                  npar_info->max_remote_ipv6_addrs);
404
405         qlcnic_free_mbx_args(&cmd);
406         return err;
407 }
408
409 static int qlcnic_sriov_set_pvid_mode(struct qlcnic_adapter *adapter,
410                                       struct qlcnic_cmd_args *cmd)
411 {
412         adapter->rx_pvid = MSW(cmd->rsp.arg[1]) & 0xffff;
413         adapter->flags &= ~QLCNIC_TAGGING_ENABLED;
414         return 0;
415 }
416
417 static int qlcnic_sriov_set_guest_vlan_mode(struct qlcnic_adapter *adapter,
418                                             struct qlcnic_cmd_args *cmd)
419 {
420         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
421         int i, num_vlans;
422         u16 *vlans;
423
424         if (sriov->allowed_vlans)
425                 return 0;
426
427         sriov->any_vlan = cmd->rsp.arg[2] & 0xf;
428         sriov->num_allowed_vlans = cmd->rsp.arg[2] >> 16;
429         dev_info(&adapter->pdev->dev, "Number of allowed Guest VLANs = %d\n",
430                  sriov->num_allowed_vlans);
431
432         qlcnic_sriov_alloc_vlans(adapter);
433
434         if (!sriov->any_vlan)
435                 return 0;
436
437         num_vlans = sriov->num_allowed_vlans;
438         sriov->allowed_vlans = kzalloc(sizeof(u16) * num_vlans, GFP_KERNEL);
439         if (!sriov->allowed_vlans)
440                 return -ENOMEM;
441
442         vlans = (u16 *)&cmd->rsp.arg[3];
443         for (i = 0; i < num_vlans; i++)
444                 sriov->allowed_vlans[i] = vlans[i];
445
446         return 0;
447 }
448
449 static int qlcnic_sriov_get_vf_acl(struct qlcnic_adapter *adapter,
450                                    struct qlcnic_info *info)
451 {
452         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
453         struct qlcnic_cmd_args cmd;
454         int ret = 0;
455
456         ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd, QLCNIC_BC_CMD_GET_ACL);
457         if (ret)
458                 return ret;
459
460         ret = qlcnic_issue_cmd(adapter, &cmd);
461         if (ret) {
462                 dev_err(&adapter->pdev->dev, "Failed to get ACL, err=%d\n",
463                         ret);
464         } else {
465                 sriov->vlan_mode = cmd.rsp.arg[1] & 0x3;
466                 switch (sriov->vlan_mode) {
467                 case QLC_GUEST_VLAN_MODE:
468                         ret = qlcnic_sriov_set_guest_vlan_mode(adapter, &cmd);
469                         break;
470                 case QLC_PVID_MODE:
471                         ret = qlcnic_sriov_set_pvid_mode(adapter, &cmd);
472                         break;
473                 }
474         }
475
476         qlcnic_free_mbx_args(&cmd);
477         return ret;
478 }
479
480 static int qlcnic_sriov_vf_init_driver(struct qlcnic_adapter *adapter)
481 {
482         struct qlcnic_hardware_context *ahw = adapter->ahw;
483         struct qlcnic_info nic_info;
484         int err;
485
486         err = qlcnic_sriov_get_vf_vport_info(adapter, &nic_info, 0);
487         if (err)
488                 return err;
489
490         ahw->max_mc_count = nic_info.max_rx_mcast_mac_filters;
491
492         err = qlcnic_get_nic_info(adapter, &nic_info, ahw->pci_func);
493         if (err)
494                 return -EIO;
495
496         err = qlcnic_sriov_get_vf_acl(adapter, &nic_info);
497         if (err)
498                 return err;
499
500         if (qlcnic_83xx_get_port_info(adapter))
501                 return -EIO;
502
503         qlcnic_sriov_vf_cfg_buff_desc(adapter);
504         adapter->flags |= QLCNIC_ADAPTER_INITIALIZED;
505         dev_info(&adapter->pdev->dev, "HAL Version: %d\n",
506                  adapter->ahw->fw_hal_version);
507
508         ahw->physical_port = (u8) nic_info.phys_port;
509         ahw->switch_mode = nic_info.switch_mode;
510         ahw->max_mtu = nic_info.max_mtu;
511         ahw->op_mode = nic_info.op_mode;
512         ahw->capabilities = nic_info.capabilities;
513         return 0;
514 }
515
516 static int qlcnic_sriov_setup_vf(struct qlcnic_adapter *adapter,
517                                  int pci_using_dac)
518 {
519         int err;
520
521         INIT_LIST_HEAD(&adapter->vf_mc_list);
522         if (!qlcnic_use_msi_x && !!qlcnic_use_msi)
523                 dev_warn(&adapter->pdev->dev,
524                          "Device does not support MSI interrupts\n");
525
526         /* compute and set default and max tx/sds rings */
527         qlcnic_set_tx_ring_count(adapter, QLCNIC_SINGLE_RING);
528         qlcnic_set_sds_ring_count(adapter, QLCNIC_SINGLE_RING);
529
530         err = qlcnic_setup_intr(adapter);
531         if (err) {
532                 dev_err(&adapter->pdev->dev, "Failed to setup interrupt\n");
533                 goto err_out_disable_msi;
534         }
535
536         err = qlcnic_83xx_setup_mbx_intr(adapter);
537         if (err)
538                 goto err_out_disable_msi;
539
540         err = qlcnic_sriov_init(adapter, 1);
541         if (err)
542                 goto err_out_disable_mbx_intr;
543
544         err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
545         if (err)
546                 goto err_out_cleanup_sriov;
547
548         err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
549         if (err)
550                 goto err_out_disable_bc_intr;
551
552         err = qlcnic_sriov_vf_init_driver(adapter);
553         if (err)
554                 goto err_out_send_channel_term;
555
556         err = qlcnic_setup_netdev(adapter, adapter->netdev, pci_using_dac);
557         if (err)
558                 goto err_out_send_channel_term;
559
560         pci_set_drvdata(adapter->pdev, adapter);
561         dev_info(&adapter->pdev->dev, "%s: XGbE port initialized\n",
562                  adapter->netdev->name);
563
564         qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
565                              adapter->ahw->idc.delay);
566         return 0;
567
568 err_out_send_channel_term:
569         qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
570
571 err_out_disable_bc_intr:
572         qlcnic_sriov_cfg_bc_intr(adapter, 0);
573
574 err_out_cleanup_sriov:
575         __qlcnic_sriov_cleanup(adapter);
576
577 err_out_disable_mbx_intr:
578         qlcnic_83xx_free_mbx_intr(adapter);
579
580 err_out_disable_msi:
581         qlcnic_teardown_intr(adapter);
582         return err;
583 }
584
585 static int qlcnic_sriov_check_dev_ready(struct qlcnic_adapter *adapter)
586 {
587         u32 state;
588
589         do {
590                 msleep(20);
591                 if (++adapter->fw_fail_cnt > QLC_BC_CMD_MAX_RETRY_CNT)
592                         return -EIO;
593                 state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
594         } while (state != QLC_83XX_IDC_DEV_READY);
595
596         return 0;
597 }
598
599 int qlcnic_sriov_vf_init(struct qlcnic_adapter *adapter, int pci_using_dac)
600 {
601         struct qlcnic_hardware_context *ahw = adapter->ahw;
602         int err;
603
604         set_bit(QLC_83XX_MODULE_LOADED, &ahw->idc.status);
605         ahw->idc.delay = QLC_83XX_IDC_FW_POLL_DELAY;
606         ahw->reset_context = 0;
607         adapter->fw_fail_cnt = 0;
608         ahw->msix_supported = 1;
609         adapter->need_fw_reset = 0;
610         adapter->flags |= QLCNIC_TX_INTR_SHARED;
611
612         err = qlcnic_sriov_check_dev_ready(adapter);
613         if (err)
614                 return err;
615
616         err = qlcnic_sriov_setup_vf(adapter, pci_using_dac);
617         if (err)
618                 return err;
619
620         if (qlcnic_read_mac_addr(adapter))
621                 dev_warn(&adapter->pdev->dev, "failed to read mac addr\n");
622
623         INIT_DELAYED_WORK(&adapter->idc_aen_work, qlcnic_83xx_idc_aen_work);
624
625         clear_bit(__QLCNIC_RESETTING, &adapter->state);
626         return 0;
627 }
628
629 void qlcnic_sriov_vf_set_ops(struct qlcnic_adapter *adapter)
630 {
631         struct qlcnic_hardware_context *ahw = adapter->ahw;
632
633         ahw->op_mode = QLCNIC_SRIOV_VF_FUNC;
634         dev_info(&adapter->pdev->dev,
635                  "HAL Version: %d Non Privileged SRIOV function\n",
636                  ahw->fw_hal_version);
637         adapter->nic_ops = &qlcnic_sriov_vf_ops;
638         set_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state);
639         return;
640 }
641
642 void qlcnic_sriov_vf_register_map(struct qlcnic_hardware_context *ahw)
643 {
644         ahw->hw_ops             = &qlcnic_sriov_vf_hw_ops;
645         ahw->reg_tbl            = (u32 *)qlcnic_83xx_reg_tbl;
646         ahw->ext_reg_tbl        = (u32 *)qlcnic_83xx_ext_reg_tbl;
647 }
648
649 static u32 qlcnic_sriov_get_bc_paysize(u32 real_pay_size, u8 curr_frag)
650 {
651         u32 pay_size;
652
653         pay_size = real_pay_size / ((curr_frag + 1) * QLC_BC_PAYLOAD_SZ);
654
655         if (pay_size)
656                 pay_size = QLC_BC_PAYLOAD_SZ;
657         else
658                 pay_size = real_pay_size % QLC_BC_PAYLOAD_SZ;
659
660         return pay_size;
661 }
662
663 int qlcnic_sriov_func_to_index(struct qlcnic_adapter *adapter, u8 pci_func)
664 {
665         struct qlcnic_vf_info *vf_info = adapter->ahw->sriov->vf_info;
666         u8 i;
667
668         if (qlcnic_sriov_vf_check(adapter))
669                 return 0;
670
671         for (i = 0; i < adapter->ahw->sriov->num_vfs; i++) {
672                 if (vf_info[i].pci_func == pci_func)
673                         return i;
674         }
675
676         return -EINVAL;
677 }
678
679 static inline int qlcnic_sriov_alloc_bc_trans(struct qlcnic_bc_trans **trans)
680 {
681         *trans = kzalloc(sizeof(struct qlcnic_bc_trans), GFP_ATOMIC);
682         if (!*trans)
683                 return -ENOMEM;
684
685         init_completion(&(*trans)->resp_cmpl);
686         return 0;
687 }
688
689 static inline int qlcnic_sriov_alloc_bc_msg(struct qlcnic_bc_hdr **hdr,
690                                             u32 size)
691 {
692         *hdr = kzalloc(sizeof(struct qlcnic_bc_hdr) * size, GFP_ATOMIC);
693         if (!*hdr)
694                 return -ENOMEM;
695
696         return 0;
697 }
698
699 static int qlcnic_sriov_alloc_bc_mbx_args(struct qlcnic_cmd_args *mbx, u32 type)
700 {
701         const struct qlcnic_mailbox_metadata *mbx_tbl;
702         int i, size;
703
704         mbx_tbl = qlcnic_sriov_bc_mbx_tbl;
705         size = ARRAY_SIZE(qlcnic_sriov_bc_mbx_tbl);
706
707         for (i = 0; i < size; i++) {
708                 if (type == mbx_tbl[i].cmd) {
709                         mbx->op_type = QLC_BC_CMD;
710                         mbx->req.num = mbx_tbl[i].in_args;
711                         mbx->rsp.num = mbx_tbl[i].out_args;
712                         mbx->req.arg = kcalloc(mbx->req.num, sizeof(u32),
713                                                GFP_ATOMIC);
714                         if (!mbx->req.arg)
715                                 return -ENOMEM;
716                         mbx->rsp.arg = kcalloc(mbx->rsp.num, sizeof(u32),
717                                                GFP_ATOMIC);
718                         if (!mbx->rsp.arg) {
719                                 kfree(mbx->req.arg);
720                                 mbx->req.arg = NULL;
721                                 return -ENOMEM;
722                         }
723                         memset(mbx->req.arg, 0, sizeof(u32) * mbx->req.num);
724                         memset(mbx->rsp.arg, 0, sizeof(u32) * mbx->rsp.num);
725                         mbx->req.arg[0] = (type | (mbx->req.num << 16) |
726                                            (3 << 29));
727                         mbx->rsp.arg[0] = (type & 0xffff) | mbx->rsp.num << 16;
728                         return 0;
729                 }
730         }
731         return -EINVAL;
732 }
733
734 static int qlcnic_sriov_prepare_bc_hdr(struct qlcnic_bc_trans *trans,
735                                        struct qlcnic_cmd_args *cmd,
736                                        u16 seq, u8 msg_type)
737 {
738         struct qlcnic_bc_hdr *hdr;
739         int i;
740         u32 num_regs, bc_pay_sz;
741         u16 remainder;
742         u8 cmd_op, num_frags, t_num_frags;
743
744         bc_pay_sz = QLC_BC_PAYLOAD_SZ;
745         if (msg_type == QLC_BC_COMMAND) {
746                 trans->req_pay = (struct qlcnic_bc_payload *)cmd->req.arg;
747                 trans->rsp_pay = (struct qlcnic_bc_payload *)cmd->rsp.arg;
748                 num_regs = cmd->req.num;
749                 trans->req_pay_size = (num_regs * 4);
750                 num_regs = cmd->rsp.num;
751                 trans->rsp_pay_size = (num_regs * 4);
752                 cmd_op = cmd->req.arg[0] & 0xff;
753                 remainder = (trans->req_pay_size) % (bc_pay_sz);
754                 num_frags = (trans->req_pay_size) / (bc_pay_sz);
755                 if (remainder)
756                         num_frags++;
757                 t_num_frags = num_frags;
758                 if (qlcnic_sriov_alloc_bc_msg(&trans->req_hdr, num_frags))
759                         return -ENOMEM;
760                 remainder = (trans->rsp_pay_size) % (bc_pay_sz);
761                 num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
762                 if (remainder)
763                         num_frags++;
764                 if (qlcnic_sriov_alloc_bc_msg(&trans->rsp_hdr, num_frags))
765                         return -ENOMEM;
766                 num_frags  = t_num_frags;
767                 hdr = trans->req_hdr;
768         }  else {
769                 cmd->req.arg = (u32 *)trans->req_pay;
770                 cmd->rsp.arg = (u32 *)trans->rsp_pay;
771                 cmd_op = cmd->req.arg[0] & 0xff;
772                 remainder = (trans->rsp_pay_size) % (bc_pay_sz);
773                 num_frags = (trans->rsp_pay_size) / (bc_pay_sz);
774                 if (remainder)
775                         num_frags++;
776                 cmd->req.num = trans->req_pay_size / 4;
777                 cmd->rsp.num = trans->rsp_pay_size / 4;
778                 hdr = trans->rsp_hdr;
779                 cmd->op_type = trans->req_hdr->op_type;
780         }
781
782         trans->trans_id = seq;
783         trans->cmd_id = cmd_op;
784         for (i = 0; i < num_frags; i++) {
785                 hdr[i].version = 2;
786                 hdr[i].msg_type = msg_type;
787                 hdr[i].op_type = cmd->op_type;
788                 hdr[i].num_cmds = 1;
789                 hdr[i].num_frags = num_frags;
790                 hdr[i].frag_num = i + 1;
791                 hdr[i].cmd_op = cmd_op;
792                 hdr[i].seq_id = seq;
793         }
794         return 0;
795 }
796
797 static void qlcnic_sriov_cleanup_transaction(struct qlcnic_bc_trans *trans)
798 {
799         if (!trans)
800                 return;
801         kfree(trans->req_hdr);
802         kfree(trans->rsp_hdr);
803         kfree(trans);
804 }
805
806 static int qlcnic_sriov_clear_trans(struct qlcnic_vf_info *vf,
807                                     struct qlcnic_bc_trans *trans, u8 type)
808 {
809         struct qlcnic_trans_list *t_list;
810         unsigned long flags;
811         int ret = 0;
812
813         if (type == QLC_BC_RESPONSE) {
814                 t_list = &vf->rcv_act;
815                 spin_lock_irqsave(&t_list->lock, flags);
816                 t_list->count--;
817                 list_del(&trans->list);
818                 if (t_list->count > 0)
819                         ret = 1;
820                 spin_unlock_irqrestore(&t_list->lock, flags);
821         }
822         if (type == QLC_BC_COMMAND) {
823                 while (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
824                         msleep(100);
825                 vf->send_cmd = NULL;
826                 clear_bit(QLC_BC_VF_SEND, &vf->state);
827         }
828         return ret;
829 }
830
831 static void qlcnic_sriov_schedule_bc_cmd(struct qlcnic_sriov *sriov,
832                                          struct qlcnic_vf_info *vf,
833                                          work_func_t func)
834 {
835         if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
836             vf->adapter->need_fw_reset)
837                 return;
838
839         queue_work(sriov->bc.bc_trans_wq, &vf->trans_work);
840 }
841
842 static inline void qlcnic_sriov_wait_for_resp(struct qlcnic_bc_trans *trans)
843 {
844         struct completion *cmpl = &trans->resp_cmpl;
845
846         if (wait_for_completion_timeout(cmpl, QLC_MBOX_RESP_TIMEOUT))
847                 trans->trans_state = QLC_END;
848         else
849                 trans->trans_state = QLC_ABORT;
850
851         return;
852 }
853
854 static void qlcnic_sriov_handle_multi_frags(struct qlcnic_bc_trans *trans,
855                                             u8 type)
856 {
857         if (type == QLC_BC_RESPONSE) {
858                 trans->curr_rsp_frag++;
859                 if (trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
860                         trans->trans_state = QLC_INIT;
861                 else
862                         trans->trans_state = QLC_END;
863         } else {
864                 trans->curr_req_frag++;
865                 if (trans->curr_req_frag < trans->req_hdr->num_frags)
866                         trans->trans_state = QLC_INIT;
867                 else
868                         trans->trans_state = QLC_WAIT_FOR_RESP;
869         }
870 }
871
872 static void qlcnic_sriov_wait_for_channel_free(struct qlcnic_bc_trans *trans,
873                                                u8 type)
874 {
875         struct qlcnic_vf_info *vf = trans->vf;
876         struct completion *cmpl = &vf->ch_free_cmpl;
877
878         if (!wait_for_completion_timeout(cmpl, QLC_MBOX_CH_FREE_TIMEOUT)) {
879                 trans->trans_state = QLC_ABORT;
880                 return;
881         }
882
883         clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
884         qlcnic_sriov_handle_multi_frags(trans, type);
885 }
886
887 static void qlcnic_sriov_pull_bc_msg(struct qlcnic_adapter *adapter,
888                                      u32 *hdr, u32 *pay, u32 size)
889 {
890         struct qlcnic_hardware_context *ahw = adapter->ahw;
891         u32 fw_mbx;
892         u8 i, max = 2, hdr_size, j;
893
894         hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
895         max = (size / sizeof(u32)) + hdr_size;
896
897         fw_mbx = readl(QLCNIC_MBX_FW(ahw, 0));
898         for (i = 2, j = 0; j < hdr_size; i++, j++)
899                 *(hdr++) = readl(QLCNIC_MBX_FW(ahw, i));
900         for (; j < max; i++, j++)
901                 *(pay++) = readl(QLCNIC_MBX_FW(ahw, i));
902 }
903
904 static int __qlcnic_sriov_issue_bc_post(struct qlcnic_vf_info *vf)
905 {
906         int ret = -EBUSY;
907         u32 timeout = 10000;
908
909         do {
910                 if (!test_and_set_bit(QLC_BC_VF_CHANNEL, &vf->state)) {
911                         ret = 0;
912                         break;
913                 }
914                 mdelay(1);
915         } while (--timeout);
916
917         return ret;
918 }
919
920 static int qlcnic_sriov_issue_bc_post(struct qlcnic_bc_trans *trans, u8 type)
921 {
922         struct qlcnic_vf_info *vf = trans->vf;
923         u32 pay_size, hdr_size;
924         u32 *hdr, *pay;
925         int ret;
926         u8 pci_func = trans->func_id;
927
928         if (__qlcnic_sriov_issue_bc_post(vf))
929                 return -EBUSY;
930
931         if (type == QLC_BC_COMMAND) {
932                 hdr = (u32 *)(trans->req_hdr + trans->curr_req_frag);
933                 pay = (u32 *)(trans->req_pay + trans->curr_req_frag);
934                 hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
935                 pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
936                                                        trans->curr_req_frag);
937                 pay_size = (pay_size / sizeof(u32));
938         } else {
939                 hdr = (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag);
940                 pay = (u32 *)(trans->rsp_pay + trans->curr_rsp_frag);
941                 hdr_size = (sizeof(struct qlcnic_bc_hdr) / sizeof(u32));
942                 pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
943                                                        trans->curr_rsp_frag);
944                 pay_size = (pay_size / sizeof(u32));
945         }
946
947         ret = qlcnic_sriov_post_bc_msg(vf->adapter, hdr, pay,
948                                        pci_func, pay_size);
949         return ret;
950 }
951
952 static int __qlcnic_sriov_send_bc_msg(struct qlcnic_bc_trans *trans,
953                                       struct qlcnic_vf_info *vf, u8 type)
954 {
955         bool flag = true;
956         int err = -EIO;
957
958         while (flag) {
959                 if (test_bit(QLC_BC_VF_FLR, &vf->state) ||
960                     vf->adapter->need_fw_reset)
961                         trans->trans_state = QLC_ABORT;
962
963                 switch (trans->trans_state) {
964                 case QLC_INIT:
965                         trans->trans_state = QLC_WAIT_FOR_CHANNEL_FREE;
966                         if (qlcnic_sriov_issue_bc_post(trans, type))
967                                 trans->trans_state = QLC_ABORT;
968                         break;
969                 case QLC_WAIT_FOR_CHANNEL_FREE:
970                         qlcnic_sriov_wait_for_channel_free(trans, type);
971                         break;
972                 case QLC_WAIT_FOR_RESP:
973                         qlcnic_sriov_wait_for_resp(trans);
974                         break;
975                 case QLC_END:
976                         err = 0;
977                         flag = false;
978                         break;
979                 case QLC_ABORT:
980                         err = -EIO;
981                         flag = false;
982                         clear_bit(QLC_BC_VF_CHANNEL, &vf->state);
983                         break;
984                 default:
985                         err = -EIO;
986                         flag = false;
987                 }
988         }
989         return err;
990 }
991
992 static int qlcnic_sriov_send_bc_cmd(struct qlcnic_adapter *adapter,
993                                     struct qlcnic_bc_trans *trans, int pci_func)
994 {
995         struct qlcnic_vf_info *vf;
996         int err, index = qlcnic_sriov_func_to_index(adapter, pci_func);
997
998         if (index < 0)
999                 return -EIO;
1000
1001         vf = &adapter->ahw->sriov->vf_info[index];
1002         trans->vf = vf;
1003         trans->func_id = pci_func;
1004
1005         if (!test_bit(QLC_BC_VF_STATE, &vf->state)) {
1006                 if (qlcnic_sriov_pf_check(adapter))
1007                         return -EIO;
1008                 if (qlcnic_sriov_vf_check(adapter) &&
1009                     trans->cmd_id != QLCNIC_BC_CMD_CHANNEL_INIT)
1010                         return -EIO;
1011         }
1012
1013         mutex_lock(&vf->send_cmd_lock);
1014         vf->send_cmd = trans;
1015         err = __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_COMMAND);
1016         qlcnic_sriov_clear_trans(vf, trans, QLC_BC_COMMAND);
1017         mutex_unlock(&vf->send_cmd_lock);
1018         return err;
1019 }
1020
1021 static void __qlcnic_sriov_process_bc_cmd(struct qlcnic_adapter *adapter,
1022                                           struct qlcnic_bc_trans *trans,
1023                                           struct qlcnic_cmd_args *cmd)
1024 {
1025 #ifdef CONFIG_QLCNIC_SRIOV
1026         if (qlcnic_sriov_pf_check(adapter)) {
1027                 qlcnic_sriov_pf_process_bc_cmd(adapter, trans, cmd);
1028                 return;
1029         }
1030 #endif
1031         cmd->rsp.arg[0] |= (0x9 << 25);
1032         return;
1033 }
1034
1035 static void qlcnic_sriov_process_bc_cmd(struct work_struct *work)
1036 {
1037         struct qlcnic_vf_info *vf = container_of(work, struct qlcnic_vf_info,
1038                                                  trans_work);
1039         struct qlcnic_bc_trans *trans = NULL;
1040         struct qlcnic_adapter *adapter  = vf->adapter;
1041         struct qlcnic_cmd_args cmd;
1042         u8 req;
1043
1044         if (adapter->need_fw_reset)
1045                 return;
1046
1047         if (test_bit(QLC_BC_VF_FLR, &vf->state))
1048                 return;
1049
1050         memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
1051         trans = list_first_entry(&vf->rcv_act.wait_list,
1052                                  struct qlcnic_bc_trans, list);
1053         adapter = vf->adapter;
1054
1055         if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, trans->req_hdr->seq_id,
1056                                         QLC_BC_RESPONSE))
1057                 goto cleanup_trans;
1058
1059         __qlcnic_sriov_process_bc_cmd(adapter, trans, &cmd);
1060         trans->trans_state = QLC_INIT;
1061         __qlcnic_sriov_send_bc_msg(trans, vf, QLC_BC_RESPONSE);
1062
1063 cleanup_trans:
1064         qlcnic_free_mbx_args(&cmd);
1065         req = qlcnic_sriov_clear_trans(vf, trans, QLC_BC_RESPONSE);
1066         qlcnic_sriov_cleanup_transaction(trans);
1067         if (req)
1068                 qlcnic_sriov_schedule_bc_cmd(adapter->ahw->sriov, vf,
1069                                              qlcnic_sriov_process_bc_cmd);
1070 }
1071
1072 static void qlcnic_sriov_handle_bc_resp(struct qlcnic_bc_hdr *hdr,
1073                                         struct qlcnic_vf_info *vf)
1074 {
1075         struct qlcnic_bc_trans *trans;
1076         u32 pay_size;
1077
1078         if (test_and_set_bit(QLC_BC_VF_SEND, &vf->state))
1079                 return;
1080
1081         trans = vf->send_cmd;
1082
1083         if (trans == NULL)
1084                 goto clear_send;
1085
1086         if (trans->trans_id != hdr->seq_id)
1087                 goto clear_send;
1088
1089         pay_size = qlcnic_sriov_get_bc_paysize(trans->rsp_pay_size,
1090                                                trans->curr_rsp_frag);
1091         qlcnic_sriov_pull_bc_msg(vf->adapter,
1092                                  (u32 *)(trans->rsp_hdr + trans->curr_rsp_frag),
1093                                  (u32 *)(trans->rsp_pay + trans->curr_rsp_frag),
1094                                  pay_size);
1095         if (++trans->curr_rsp_frag < trans->rsp_hdr->num_frags)
1096                 goto clear_send;
1097
1098         complete(&trans->resp_cmpl);
1099
1100 clear_send:
1101         clear_bit(QLC_BC_VF_SEND, &vf->state);
1102 }
1103
1104 int __qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
1105                                 struct qlcnic_vf_info *vf,
1106                                 struct qlcnic_bc_trans *trans)
1107 {
1108         struct qlcnic_trans_list *t_list = &vf->rcv_act;
1109
1110         t_list->count++;
1111         list_add_tail(&trans->list, &t_list->wait_list);
1112         if (t_list->count == 1)
1113                 qlcnic_sriov_schedule_bc_cmd(sriov, vf,
1114                                              qlcnic_sriov_process_bc_cmd);
1115         return 0;
1116 }
1117
1118 static int qlcnic_sriov_add_act_list(struct qlcnic_sriov *sriov,
1119                                      struct qlcnic_vf_info *vf,
1120                                      struct qlcnic_bc_trans *trans)
1121 {
1122         struct qlcnic_trans_list *t_list = &vf->rcv_act;
1123
1124         spin_lock(&t_list->lock);
1125
1126         __qlcnic_sriov_add_act_list(sriov, vf, trans);
1127
1128         spin_unlock(&t_list->lock);
1129         return 0;
1130 }
1131
1132 static void qlcnic_sriov_handle_pending_trans(struct qlcnic_sriov *sriov,
1133                                               struct qlcnic_vf_info *vf,
1134                                               struct qlcnic_bc_hdr *hdr)
1135 {
1136         struct qlcnic_bc_trans *trans = NULL;
1137         struct list_head *node;
1138         u32 pay_size, curr_frag;
1139         u8 found = 0, active = 0;
1140
1141         spin_lock(&vf->rcv_pend.lock);
1142         if (vf->rcv_pend.count > 0) {
1143                 list_for_each(node, &vf->rcv_pend.wait_list) {
1144                         trans = list_entry(node, struct qlcnic_bc_trans, list);
1145                         if (trans->trans_id == hdr->seq_id) {
1146                                 found = 1;
1147                                 break;
1148                         }
1149                 }
1150         }
1151
1152         if (found) {
1153                 curr_frag = trans->curr_req_frag;
1154                 pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
1155                                                        curr_frag);
1156                 qlcnic_sriov_pull_bc_msg(vf->adapter,
1157                                          (u32 *)(trans->req_hdr + curr_frag),
1158                                          (u32 *)(trans->req_pay + curr_frag),
1159                                          pay_size);
1160                 trans->curr_req_frag++;
1161                 if (trans->curr_req_frag >= hdr->num_frags) {
1162                         vf->rcv_pend.count--;
1163                         list_del(&trans->list);
1164                         active = 1;
1165                 }
1166         }
1167         spin_unlock(&vf->rcv_pend.lock);
1168
1169         if (active)
1170                 if (qlcnic_sriov_add_act_list(sriov, vf, trans))
1171                         qlcnic_sriov_cleanup_transaction(trans);
1172
1173         return;
1174 }
1175
1176 static void qlcnic_sriov_handle_bc_cmd(struct qlcnic_sriov *sriov,
1177                                        struct qlcnic_bc_hdr *hdr,
1178                                        struct qlcnic_vf_info *vf)
1179 {
1180         struct qlcnic_bc_trans *trans;
1181         struct qlcnic_adapter *adapter = vf->adapter;
1182         struct qlcnic_cmd_args cmd;
1183         u32 pay_size;
1184         int err;
1185         u8 cmd_op;
1186
1187         if (adapter->need_fw_reset)
1188                 return;
1189
1190         if (!test_bit(QLC_BC_VF_STATE, &vf->state) &&
1191             hdr->op_type != QLC_BC_CMD &&
1192             hdr->cmd_op != QLCNIC_BC_CMD_CHANNEL_INIT)
1193                 return;
1194
1195         if (hdr->frag_num > 1) {
1196                 qlcnic_sriov_handle_pending_trans(sriov, vf, hdr);
1197                 return;
1198         }
1199
1200         memset(&cmd, 0, sizeof(struct qlcnic_cmd_args));
1201         cmd_op = hdr->cmd_op;
1202         if (qlcnic_sriov_alloc_bc_trans(&trans))
1203                 return;
1204
1205         if (hdr->op_type == QLC_BC_CMD)
1206                 err = qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op);
1207         else
1208                 err = qlcnic_alloc_mbx_args(&cmd, adapter, cmd_op);
1209
1210         if (err) {
1211                 qlcnic_sriov_cleanup_transaction(trans);
1212                 return;
1213         }
1214
1215         cmd.op_type = hdr->op_type;
1216         if (qlcnic_sriov_prepare_bc_hdr(trans, &cmd, hdr->seq_id,
1217                                         QLC_BC_COMMAND)) {
1218                 qlcnic_free_mbx_args(&cmd);
1219                 qlcnic_sriov_cleanup_transaction(trans);
1220                 return;
1221         }
1222
1223         pay_size = qlcnic_sriov_get_bc_paysize(trans->req_pay_size,
1224                                          trans->curr_req_frag);
1225         qlcnic_sriov_pull_bc_msg(vf->adapter,
1226                                  (u32 *)(trans->req_hdr + trans->curr_req_frag),
1227                                  (u32 *)(trans->req_pay + trans->curr_req_frag),
1228                                  pay_size);
1229         trans->func_id = vf->pci_func;
1230         trans->vf = vf;
1231         trans->trans_id = hdr->seq_id;
1232         trans->curr_req_frag++;
1233
1234         if (qlcnic_sriov_soft_flr_check(adapter, trans, vf))
1235                 return;
1236
1237         if (trans->curr_req_frag == trans->req_hdr->num_frags) {
1238                 if (qlcnic_sriov_add_act_list(sriov, vf, trans)) {
1239                         qlcnic_free_mbx_args(&cmd);
1240                         qlcnic_sriov_cleanup_transaction(trans);
1241                 }
1242         } else {
1243                 spin_lock(&vf->rcv_pend.lock);
1244                 list_add_tail(&trans->list, &vf->rcv_pend.wait_list);
1245                 vf->rcv_pend.count++;
1246                 spin_unlock(&vf->rcv_pend.lock);
1247         }
1248 }
1249
1250 static void qlcnic_sriov_handle_msg_event(struct qlcnic_sriov *sriov,
1251                                           struct qlcnic_vf_info *vf)
1252 {
1253         struct qlcnic_bc_hdr hdr;
1254         u32 *ptr = (u32 *)&hdr;
1255         u8 msg_type, i;
1256
1257         for (i = 2; i < 6; i++)
1258                 ptr[i - 2] = readl(QLCNIC_MBX_FW(vf->adapter->ahw, i));
1259         msg_type = hdr.msg_type;
1260
1261         switch (msg_type) {
1262         case QLC_BC_COMMAND:
1263                 qlcnic_sriov_handle_bc_cmd(sriov, &hdr, vf);
1264                 break;
1265         case QLC_BC_RESPONSE:
1266                 qlcnic_sriov_handle_bc_resp(&hdr, vf);
1267                 break;
1268         }
1269 }
1270
1271 static void qlcnic_sriov_handle_flr_event(struct qlcnic_sriov *sriov,
1272                                           struct qlcnic_vf_info *vf)
1273 {
1274         struct qlcnic_adapter *adapter = vf->adapter;
1275
1276         if (qlcnic_sriov_pf_check(adapter))
1277                 qlcnic_sriov_pf_handle_flr(sriov, vf);
1278         else
1279                 dev_err(&adapter->pdev->dev,
1280                         "Invalid event to VF. VF should not get FLR event\n");
1281 }
1282
1283 void qlcnic_sriov_handle_bc_event(struct qlcnic_adapter *adapter, u32 event)
1284 {
1285         struct qlcnic_vf_info *vf;
1286         struct qlcnic_sriov *sriov;
1287         int index;
1288         u8 pci_func;
1289
1290         sriov = adapter->ahw->sriov;
1291         pci_func = qlcnic_sriov_target_func_id(event);
1292         index = qlcnic_sriov_func_to_index(adapter, pci_func);
1293
1294         if (index < 0)
1295                 return;
1296
1297         vf = &sriov->vf_info[index];
1298         vf->pci_func = pci_func;
1299
1300         if (qlcnic_sriov_channel_free_check(event))
1301                 complete(&vf->ch_free_cmpl);
1302
1303         if (qlcnic_sriov_flr_check(event)) {
1304                 qlcnic_sriov_handle_flr_event(sriov, vf);
1305                 return;
1306         }
1307
1308         if (qlcnic_sriov_bc_msg_check(event))
1309                 qlcnic_sriov_handle_msg_event(sriov, vf);
1310 }
1311
1312 int qlcnic_sriov_cfg_bc_intr(struct qlcnic_adapter *adapter, u8 enable)
1313 {
1314         struct qlcnic_cmd_args cmd;
1315         int err;
1316
1317         if (!test_bit(__QLCNIC_SRIOV_ENABLE, &adapter->state))
1318                 return 0;
1319
1320         if (qlcnic_alloc_mbx_args(&cmd, adapter, QLCNIC_CMD_BC_EVENT_SETUP))
1321                 return -ENOMEM;
1322
1323         if (enable)
1324                 cmd.req.arg[1] = (1 << 4) | (1 << 5) | (1 << 6) | (1 << 7);
1325
1326         err = qlcnic_83xx_issue_cmd(adapter, &cmd);
1327
1328         if (err != QLCNIC_RCODE_SUCCESS) {
1329                 dev_err(&adapter->pdev->dev,
1330                         "Failed to %s bc events, err=%d\n",
1331                         (enable ? "enable" : "disable"), err);
1332         }
1333
1334         qlcnic_free_mbx_args(&cmd);
1335         return err;
1336 }
1337
1338 static int qlcnic_sriov_retry_bc_cmd(struct qlcnic_adapter *adapter,
1339                                      struct qlcnic_bc_trans *trans)
1340 {
1341         u8 max = QLC_BC_CMD_MAX_RETRY_CNT;
1342         u32 state;
1343
1344         state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
1345         if (state == QLC_83XX_IDC_DEV_READY) {
1346                 msleep(20);
1347                 clear_bit(QLC_BC_VF_CHANNEL, &trans->vf->state);
1348                 trans->trans_state = QLC_INIT;
1349                 if (++adapter->fw_fail_cnt > max)
1350                         return -EIO;
1351                 else
1352                         return 0;
1353         }
1354
1355         return -EIO;
1356 }
1357
1358 static int qlcnic_sriov_issue_cmd(struct qlcnic_adapter *adapter,
1359                                   struct qlcnic_cmd_args *cmd)
1360 {
1361         struct qlcnic_hardware_context *ahw = adapter->ahw;
1362         struct qlcnic_mailbox *mbx = ahw->mailbox;
1363         struct device *dev = &adapter->pdev->dev;
1364         struct qlcnic_bc_trans *trans;
1365         int err;
1366         u32 rsp_data, opcode, mbx_err_code, rsp;
1367         u16 seq = ++adapter->ahw->sriov->bc.trans_counter;
1368         u8 func = ahw->pci_func;
1369
1370         rsp = qlcnic_sriov_alloc_bc_trans(&trans);
1371         if (rsp)
1372                 return rsp;
1373
1374         rsp = qlcnic_sriov_prepare_bc_hdr(trans, cmd, seq, QLC_BC_COMMAND);
1375         if (rsp)
1376                 goto cleanup_transaction;
1377
1378 retry:
1379         if (!test_bit(QLC_83XX_MBX_READY, &mbx->status)) {
1380                 rsp = -EIO;
1381                 QLCDB(adapter, DRV, "MBX not Ready!(cmd 0x%x) for VF 0x%x\n",
1382                       QLCNIC_MBX_RSP(cmd->req.arg[0]), func);
1383                 goto err_out;
1384         }
1385
1386         err = qlcnic_sriov_send_bc_cmd(adapter, trans, func);
1387         if (err) {
1388                 dev_err(dev, "MBX command 0x%x timed out for VF %d\n",
1389                         (cmd->req.arg[0] & 0xffff), func);
1390                 rsp = QLCNIC_RCODE_TIMEOUT;
1391
1392                 /* After adapter reset PF driver may take some time to
1393                  * respond to VF's request. Retry request till maximum retries.
1394                  */
1395                 if ((trans->req_hdr->cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT) &&
1396                     !qlcnic_sriov_retry_bc_cmd(adapter, trans))
1397                         goto retry;
1398
1399                 goto err_out;
1400         }
1401
1402         rsp_data = cmd->rsp.arg[0];
1403         mbx_err_code = QLCNIC_MBX_STATUS(rsp_data);
1404         opcode = QLCNIC_MBX_RSP(cmd->req.arg[0]);
1405
1406         if ((mbx_err_code == QLCNIC_MBX_RSP_OK) ||
1407             (mbx_err_code == QLCNIC_MBX_PORT_RSP_OK)) {
1408                 rsp = QLCNIC_RCODE_SUCCESS;
1409         } else {
1410                 rsp = mbx_err_code;
1411                 if (!rsp)
1412                         rsp = 1;
1413                 dev_err(dev,
1414                         "MBX command 0x%x failed with err:0x%x for VF %d\n",
1415                         opcode, mbx_err_code, func);
1416         }
1417
1418 err_out:
1419         if (rsp == QLCNIC_RCODE_TIMEOUT) {
1420                 ahw->reset_context = 1;
1421                 adapter->need_fw_reset = 1;
1422                 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1423         }
1424
1425 cleanup_transaction:
1426         qlcnic_sriov_cleanup_transaction(trans);
1427         return rsp;
1428 }
1429
1430 static int qlcnic_sriov_channel_cfg_cmd(struct qlcnic_adapter *adapter, u8 cmd_op)
1431 {
1432         struct qlcnic_cmd_args cmd;
1433         struct qlcnic_vf_info *vf = &adapter->ahw->sriov->vf_info[0];
1434         int ret;
1435
1436         if (qlcnic_sriov_alloc_bc_mbx_args(&cmd, cmd_op))
1437                 return -ENOMEM;
1438
1439         ret = qlcnic_issue_cmd(adapter, &cmd);
1440         if (ret) {
1441                 dev_err(&adapter->pdev->dev,
1442                         "Failed bc channel %s %d\n", cmd_op ? "term" : "init",
1443                         ret);
1444                 goto out;
1445         }
1446
1447         cmd_op = (cmd.rsp.arg[0] & 0xff);
1448         if (cmd.rsp.arg[0] >> 25 == 2)
1449                 return 2;
1450         if (cmd_op == QLCNIC_BC_CMD_CHANNEL_INIT)
1451                 set_bit(QLC_BC_VF_STATE, &vf->state);
1452         else
1453                 clear_bit(QLC_BC_VF_STATE, &vf->state);
1454
1455 out:
1456         qlcnic_free_mbx_args(&cmd);
1457         return ret;
1458 }
1459
1460 static void qlcnic_vf_add_mc_list(struct net_device *netdev)
1461 {
1462         struct qlcnic_adapter *adapter = netdev_priv(netdev);
1463         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1464         struct qlcnic_mac_vlan_list *cur;
1465         struct list_head *head, tmp_list;
1466         struct qlcnic_vf_info *vf;
1467         u16 vlan_id;
1468         int i;
1469
1470         static const u8 bcast_addr[ETH_ALEN] = {
1471                 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
1472         };
1473
1474         vf = &adapter->ahw->sriov->vf_info[0];
1475         INIT_LIST_HEAD(&tmp_list);
1476         head = &adapter->vf_mc_list;
1477         netif_addr_lock_bh(netdev);
1478
1479         while (!list_empty(head)) {
1480                 cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
1481                 list_move(&cur->list, &tmp_list);
1482         }
1483
1484         netif_addr_unlock_bh(netdev);
1485
1486         while (!list_empty(&tmp_list)) {
1487                 cur = list_entry((&tmp_list)->next,
1488                                  struct qlcnic_mac_vlan_list, list);
1489                 if (!qlcnic_sriov_check_any_vlan(vf)) {
1490                         qlcnic_nic_add_mac(adapter, bcast_addr, 0);
1491                         qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
1492                 } else {
1493                         mutex_lock(&vf->vlan_list_lock);
1494                         for (i = 0; i < sriov->num_allowed_vlans; i++) {
1495                                 vlan_id = vf->sriov_vlans[i];
1496                                 if (vlan_id) {
1497                                         qlcnic_nic_add_mac(adapter, bcast_addr,
1498                                                            vlan_id);
1499                                         qlcnic_nic_add_mac(adapter,
1500                                                            cur->mac_addr,
1501                                                            vlan_id);
1502                                 }
1503                         }
1504                         mutex_unlock(&vf->vlan_list_lock);
1505                         if (qlcnic_84xx_check(adapter)) {
1506                                 qlcnic_nic_add_mac(adapter, bcast_addr, 0);
1507                                 qlcnic_nic_add_mac(adapter, cur->mac_addr, 0);
1508                         }
1509                 }
1510                 list_del(&cur->list);
1511                 kfree(cur);
1512         }
1513 }
1514
1515 void qlcnic_sriov_cleanup_async_list(struct qlcnic_back_channel *bc)
1516 {
1517         struct list_head *head = &bc->async_list;
1518         struct qlcnic_async_work_list *entry;
1519
1520         while (!list_empty(head)) {
1521                 entry = list_entry(head->next, struct qlcnic_async_work_list,
1522                                    list);
1523                 cancel_work_sync(&entry->work);
1524                 list_del(&entry->list);
1525                 kfree(entry);
1526         }
1527 }
1528
1529 static void qlcnic_sriov_vf_set_multi(struct net_device *netdev)
1530 {
1531         struct qlcnic_adapter *adapter = netdev_priv(netdev);
1532         struct qlcnic_hardware_context *ahw = adapter->ahw;
1533         u32 mode = VPORT_MISS_MODE_DROP;
1534
1535         if (!test_bit(__QLCNIC_FW_ATTACHED, &adapter->state))
1536                 return;
1537
1538         if (netdev->flags & IFF_PROMISC) {
1539                 if (!(adapter->flags & QLCNIC_PROMISC_DISABLED))
1540                         mode = VPORT_MISS_MODE_ACCEPT_ALL;
1541         } else if ((netdev->flags & IFF_ALLMULTI) ||
1542                    (netdev_mc_count(netdev) > ahw->max_mc_count)) {
1543                 mode = VPORT_MISS_MODE_ACCEPT_MULTI;
1544         }
1545
1546         if (qlcnic_sriov_vf_check(adapter))
1547                 qlcnic_vf_add_mc_list(netdev);
1548
1549         qlcnic_nic_set_promisc(adapter, mode);
1550 }
1551
1552 static void qlcnic_sriov_handle_async_multi(struct work_struct *work)
1553 {
1554         struct qlcnic_async_work_list *entry;
1555         struct net_device *netdev;
1556
1557         entry = container_of(work, struct qlcnic_async_work_list, work);
1558         netdev = (struct net_device *)entry->ptr;
1559
1560         qlcnic_sriov_vf_set_multi(netdev);
1561         return;
1562 }
1563
1564 static struct qlcnic_async_work_list *
1565 qlcnic_sriov_get_free_node_async_work(struct qlcnic_back_channel *bc)
1566 {
1567         struct list_head *node;
1568         struct qlcnic_async_work_list *entry = NULL;
1569         u8 empty = 0;
1570
1571         list_for_each(node, &bc->async_list) {
1572                 entry = list_entry(node, struct qlcnic_async_work_list, list);
1573                 if (!work_pending(&entry->work)) {
1574                         empty = 1;
1575                         break;
1576                 }
1577         }
1578
1579         if (!empty) {
1580                 entry = kzalloc(sizeof(struct qlcnic_async_work_list),
1581                                 GFP_ATOMIC);
1582                 if (entry == NULL)
1583                         return NULL;
1584                 list_add_tail(&entry->list, &bc->async_list);
1585         }
1586
1587         return entry;
1588 }
1589
1590 static void qlcnic_sriov_schedule_bc_async_work(struct qlcnic_back_channel *bc,
1591                                                 work_func_t func, void *data)
1592 {
1593         struct qlcnic_async_work_list *entry = NULL;
1594
1595         entry = qlcnic_sriov_get_free_node_async_work(bc);
1596         if (!entry)
1597                 return;
1598
1599         entry->ptr = data;
1600         INIT_WORK(&entry->work, func);
1601         queue_work(bc->bc_async_wq, &entry->work);
1602 }
1603
1604 void qlcnic_sriov_vf_schedule_multi(struct net_device *netdev)
1605 {
1606
1607         struct qlcnic_adapter *adapter = netdev_priv(netdev);
1608         struct qlcnic_back_channel *bc = &adapter->ahw->sriov->bc;
1609
1610         if (adapter->need_fw_reset)
1611                 return;
1612
1613         qlcnic_sriov_schedule_bc_async_work(bc, qlcnic_sriov_handle_async_multi,
1614                                             netdev);
1615 }
1616
1617 static int qlcnic_sriov_vf_reinit_driver(struct qlcnic_adapter *adapter)
1618 {
1619         int err;
1620
1621         adapter->need_fw_reset = 0;
1622         qlcnic_83xx_reinit_mbx_work(adapter->ahw->mailbox);
1623         qlcnic_83xx_enable_mbx_interrupt(adapter);
1624
1625         err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
1626         if (err)
1627                 return err;
1628
1629         err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
1630         if (err)
1631                 goto err_out_cleanup_bc_intr;
1632
1633         err = qlcnic_sriov_vf_init_driver(adapter);
1634         if (err)
1635                 goto err_out_term_channel;
1636
1637         return 0;
1638
1639 err_out_term_channel:
1640         qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
1641
1642 err_out_cleanup_bc_intr:
1643         qlcnic_sriov_cfg_bc_intr(adapter, 0);
1644         return err;
1645 }
1646
1647 static void qlcnic_sriov_vf_attach(struct qlcnic_adapter *adapter)
1648 {
1649         struct net_device *netdev = adapter->netdev;
1650
1651         if (netif_running(netdev)) {
1652                 if (!qlcnic_up(adapter, netdev))
1653                         qlcnic_restore_indev_addr(netdev, NETDEV_UP);
1654         }
1655
1656         netif_device_attach(netdev);
1657 }
1658
1659 static void qlcnic_sriov_vf_detach(struct qlcnic_adapter *adapter)
1660 {
1661         struct qlcnic_hardware_context *ahw = adapter->ahw;
1662         struct qlcnic_intrpt_config *intr_tbl = ahw->intr_tbl;
1663         struct net_device *netdev = adapter->netdev;
1664         u8 i, max_ints = ahw->num_msix - 1;
1665
1666         netif_device_detach(netdev);
1667         qlcnic_83xx_detach_mailbox_work(adapter);
1668         qlcnic_83xx_disable_mbx_intr(adapter);
1669
1670         if (netif_running(netdev))
1671                 qlcnic_down(adapter, netdev);
1672
1673         for (i = 0; i < max_ints; i++) {
1674                 intr_tbl[i].id = i;
1675                 intr_tbl[i].enabled = 0;
1676                 intr_tbl[i].src = 0;
1677         }
1678         ahw->reset_context = 0;
1679 }
1680
1681 static int qlcnic_sriov_vf_handle_dev_ready(struct qlcnic_adapter *adapter)
1682 {
1683         struct qlcnic_hardware_context *ahw = adapter->ahw;
1684         struct device *dev = &adapter->pdev->dev;
1685         struct qlc_83xx_idc *idc = &ahw->idc;
1686         u8 func = ahw->pci_func;
1687         u32 state;
1688
1689         if ((idc->prev_state == QLC_83XX_IDC_DEV_NEED_RESET) ||
1690             (idc->prev_state == QLC_83XX_IDC_DEV_INIT)) {
1691                 if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
1692                         qlcnic_sriov_vf_attach(adapter);
1693                         adapter->fw_fail_cnt = 0;
1694                         dev_info(dev,
1695                                  "%s: Reinitialization of VF 0x%x done after FW reset\n",
1696                                  __func__, func);
1697                 } else {
1698                         dev_err(dev,
1699                                 "%s: Reinitialization of VF 0x%x failed after FW reset\n",
1700                                 __func__, func);
1701                         state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
1702                         dev_info(dev, "Current state 0x%x after FW reset\n",
1703                                  state);
1704                 }
1705         }
1706
1707         return 0;
1708 }
1709
1710 static int qlcnic_sriov_vf_handle_context_reset(struct qlcnic_adapter *adapter)
1711 {
1712         struct qlcnic_hardware_context *ahw = adapter->ahw;
1713         struct qlcnic_mailbox *mbx = ahw->mailbox;
1714         struct device *dev = &adapter->pdev->dev;
1715         struct qlc_83xx_idc *idc = &ahw->idc;
1716         u8 func = ahw->pci_func;
1717         u32 state;
1718
1719         adapter->reset_ctx_cnt++;
1720
1721         /* Skip the context reset and check if FW is hung */
1722         if (adapter->reset_ctx_cnt < 3) {
1723                 adapter->need_fw_reset = 1;
1724                 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1725                 dev_info(dev,
1726                          "Resetting context, wait here to check if FW is in failed state\n");
1727                 return 0;
1728         }
1729
1730         /* Check if number of resets exceed the threshold.
1731          * If it exceeds the threshold just fail the VF.
1732          */
1733         if (adapter->reset_ctx_cnt > QLC_83XX_VF_RESET_FAIL_THRESH) {
1734                 clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
1735                 adapter->tx_timeo_cnt = 0;
1736                 adapter->fw_fail_cnt = 0;
1737                 adapter->reset_ctx_cnt = 0;
1738                 qlcnic_sriov_vf_detach(adapter);
1739                 dev_err(dev,
1740                         "Device context resets have exceeded the threshold, device interface will be shutdown\n");
1741                 return -EIO;
1742         }
1743
1744         dev_info(dev, "Resetting context of VF 0x%x\n", func);
1745         dev_info(dev, "%s: Context reset count %d for VF 0x%x\n",
1746                  __func__, adapter->reset_ctx_cnt, func);
1747         set_bit(__QLCNIC_RESETTING, &adapter->state);
1748         adapter->need_fw_reset = 1;
1749         clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1750         qlcnic_sriov_vf_detach(adapter);
1751         adapter->need_fw_reset = 0;
1752
1753         if (!qlcnic_sriov_vf_reinit_driver(adapter)) {
1754                 qlcnic_sriov_vf_attach(adapter);
1755                 adapter->tx_timeo_cnt = 0;
1756                 adapter->reset_ctx_cnt = 0;
1757                 adapter->fw_fail_cnt = 0;
1758                 dev_info(dev, "Done resetting context for VF 0x%x\n", func);
1759         } else {
1760                 dev_err(dev, "%s: Reinitialization of VF 0x%x failed\n",
1761                         __func__, func);
1762                 state = QLCRDX(ahw, QLC_83XX_IDC_DEV_STATE);
1763                 dev_info(dev, "%s: Current state 0x%x\n", __func__, state);
1764         }
1765
1766         return 0;
1767 }
1768
1769 static int qlcnic_sriov_vf_idc_ready_state(struct qlcnic_adapter *adapter)
1770 {
1771         struct qlcnic_hardware_context *ahw = adapter->ahw;
1772         int ret = 0;
1773
1774         if (ahw->idc.prev_state != QLC_83XX_IDC_DEV_READY)
1775                 ret = qlcnic_sriov_vf_handle_dev_ready(adapter);
1776         else if (ahw->reset_context)
1777                 ret = qlcnic_sriov_vf_handle_context_reset(adapter);
1778
1779         clear_bit(__QLCNIC_RESETTING, &adapter->state);
1780         return ret;
1781 }
1782
1783 static int qlcnic_sriov_vf_idc_failed_state(struct qlcnic_adapter *adapter)
1784 {
1785         struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1786
1787         dev_err(&adapter->pdev->dev, "Device is in failed state\n");
1788         if (idc->prev_state == QLC_83XX_IDC_DEV_READY)
1789                 qlcnic_sriov_vf_detach(adapter);
1790
1791         clear_bit(QLC_83XX_MODULE_LOADED, &idc->status);
1792         clear_bit(__QLCNIC_RESETTING, &adapter->state);
1793         return -EIO;
1794 }
1795
1796 static int
1797 qlcnic_sriov_vf_idc_need_quiescent_state(struct qlcnic_adapter *adapter)
1798 {
1799         struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
1800         struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1801
1802         dev_info(&adapter->pdev->dev, "Device is in quiescent state\n");
1803         if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
1804                 set_bit(__QLCNIC_RESETTING, &adapter->state);
1805                 adapter->tx_timeo_cnt = 0;
1806                 adapter->reset_ctx_cnt = 0;
1807                 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1808                 qlcnic_sriov_vf_detach(adapter);
1809         }
1810
1811         return 0;
1812 }
1813
1814 static int qlcnic_sriov_vf_idc_init_reset_state(struct qlcnic_adapter *adapter)
1815 {
1816         struct qlcnic_mailbox *mbx = adapter->ahw->mailbox;
1817         struct qlc_83xx_idc *idc = &adapter->ahw->idc;
1818         u8 func = adapter->ahw->pci_func;
1819
1820         if (idc->prev_state == QLC_83XX_IDC_DEV_READY) {
1821                 dev_err(&adapter->pdev->dev,
1822                         "Firmware hang detected by VF 0x%x\n", func);
1823                 set_bit(__QLCNIC_RESETTING, &adapter->state);
1824                 adapter->tx_timeo_cnt = 0;
1825                 adapter->reset_ctx_cnt = 0;
1826                 clear_bit(QLC_83XX_MBX_READY, &mbx->status);
1827                 qlcnic_sriov_vf_detach(adapter);
1828         }
1829         return 0;
1830 }
1831
1832 static int qlcnic_sriov_vf_idc_unknown_state(struct qlcnic_adapter *adapter)
1833 {
1834         dev_err(&adapter->pdev->dev, "%s: Device in unknown state\n", __func__);
1835         return 0;
1836 }
1837
1838 static void qlcnic_sriov_vf_poll_dev_state(struct work_struct *work)
1839 {
1840         struct qlcnic_adapter *adapter;
1841         struct qlc_83xx_idc *idc;
1842         int ret = 0;
1843
1844         adapter = container_of(work, struct qlcnic_adapter, fw_work.work);
1845         idc = &adapter->ahw->idc;
1846         idc->curr_state = QLCRDX(adapter->ahw, QLC_83XX_IDC_DEV_STATE);
1847
1848         switch (idc->curr_state) {
1849         case QLC_83XX_IDC_DEV_READY:
1850                 ret = qlcnic_sriov_vf_idc_ready_state(adapter);
1851                 break;
1852         case QLC_83XX_IDC_DEV_NEED_RESET:
1853         case QLC_83XX_IDC_DEV_INIT:
1854                 ret = qlcnic_sriov_vf_idc_init_reset_state(adapter);
1855                 break;
1856         case QLC_83XX_IDC_DEV_NEED_QUISCENT:
1857                 ret = qlcnic_sriov_vf_idc_need_quiescent_state(adapter);
1858                 break;
1859         case QLC_83XX_IDC_DEV_FAILED:
1860                 ret = qlcnic_sriov_vf_idc_failed_state(adapter);
1861                 break;
1862         case QLC_83XX_IDC_DEV_QUISCENT:
1863                 break;
1864         default:
1865                 ret = qlcnic_sriov_vf_idc_unknown_state(adapter);
1866         }
1867
1868         idc->prev_state = idc->curr_state;
1869         if (!ret && test_bit(QLC_83XX_MODULE_LOADED, &idc->status))
1870                 qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
1871                                      idc->delay);
1872 }
1873
1874 static void qlcnic_sriov_vf_cancel_fw_work(struct qlcnic_adapter *adapter)
1875 {
1876         while (test_and_set_bit(__QLCNIC_RESETTING, &adapter->state))
1877                 msleep(20);
1878
1879         clear_bit(QLC_83XX_MODULE_LOADED, &adapter->ahw->idc.status);
1880         clear_bit(__QLCNIC_RESETTING, &adapter->state);
1881         cancel_delayed_work_sync(&adapter->fw_work);
1882 }
1883
1884 static int qlcnic_sriov_check_vlan_id(struct qlcnic_sriov *sriov,
1885                                       struct qlcnic_vf_info *vf, u16 vlan_id)
1886 {
1887         int i, err = -EINVAL;
1888
1889         if (!vf->sriov_vlans)
1890                 return err;
1891
1892         mutex_lock(&vf->vlan_list_lock);
1893
1894         for (i = 0; i < sriov->num_allowed_vlans; i++) {
1895                 if (vf->sriov_vlans[i] == vlan_id) {
1896                         err = 0;
1897                         break;
1898                 }
1899         }
1900
1901         mutex_unlock(&vf->vlan_list_lock);
1902         return err;
1903 }
1904
1905 static int qlcnic_sriov_validate_num_vlans(struct qlcnic_sriov *sriov,
1906                                            struct qlcnic_vf_info *vf)
1907 {
1908         int err = 0;
1909
1910         mutex_lock(&vf->vlan_list_lock);
1911
1912         if (vf->num_vlan >= sriov->num_allowed_vlans)
1913                 err = -EINVAL;
1914
1915         mutex_unlock(&vf->vlan_list_lock);
1916         return err;
1917 }
1918
1919 static int qlcnic_sriov_validate_vlan_cfg(struct qlcnic_adapter *adapter,
1920                                           u16 vid, u8 enable)
1921 {
1922         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1923         struct qlcnic_vf_info *vf;
1924         bool vlan_exist;
1925         u8 allowed = 0;
1926         int i;
1927
1928         vf = &adapter->ahw->sriov->vf_info[0];
1929         vlan_exist = qlcnic_sriov_check_any_vlan(vf);
1930         if (sriov->vlan_mode != QLC_GUEST_VLAN_MODE)
1931                 return -EINVAL;
1932
1933         if (enable) {
1934                 if (qlcnic_83xx_vf_check(adapter) && vlan_exist)
1935                         return -EINVAL;
1936
1937                 if (qlcnic_sriov_validate_num_vlans(sriov, vf))
1938                         return -EINVAL;
1939
1940                 if (sriov->any_vlan) {
1941                         for (i = 0; i < sriov->num_allowed_vlans; i++) {
1942                                 if (sriov->allowed_vlans[i] == vid)
1943                                         allowed = 1;
1944                         }
1945
1946                         if (!allowed)
1947                                 return -EINVAL;
1948                 }
1949         } else {
1950                 if (!vlan_exist || qlcnic_sriov_check_vlan_id(sriov, vf, vid))
1951                         return -EINVAL;
1952         }
1953
1954         return 0;
1955 }
1956
1957 static void qlcnic_sriov_vlan_operation(struct qlcnic_vf_info *vf, u16 vlan_id,
1958                                         enum qlcnic_vlan_operations opcode)
1959 {
1960         struct qlcnic_adapter *adapter = vf->adapter;
1961         struct qlcnic_sriov *sriov;
1962
1963         sriov = adapter->ahw->sriov;
1964
1965         if (!vf->sriov_vlans)
1966                 return;
1967
1968         mutex_lock(&vf->vlan_list_lock);
1969
1970         switch (opcode) {
1971         case QLC_VLAN_ADD:
1972                 qlcnic_sriov_add_vlan_id(sriov, vf, vlan_id);
1973                 break;
1974         case QLC_VLAN_DELETE:
1975                 qlcnic_sriov_del_vlan_id(sriov, vf, vlan_id);
1976                 break;
1977         default:
1978                 netdev_err(adapter->netdev, "Invalid VLAN operation\n");
1979         }
1980
1981         mutex_unlock(&vf->vlan_list_lock);
1982         return;
1983 }
1984
1985 int qlcnic_sriov_cfg_vf_guest_vlan(struct qlcnic_adapter *adapter,
1986                                    u16 vid, u8 enable)
1987 {
1988         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
1989         struct qlcnic_vf_info *vf;
1990         struct qlcnic_cmd_args cmd;
1991         int ret;
1992
1993         if (vid == 0)
1994                 return 0;
1995
1996         vf = &adapter->ahw->sriov->vf_info[0];
1997         ret = qlcnic_sriov_validate_vlan_cfg(adapter, vid, enable);
1998         if (ret)
1999                 return ret;
2000
2001         ret = qlcnic_sriov_alloc_bc_mbx_args(&cmd,
2002                                              QLCNIC_BC_CMD_CFG_GUEST_VLAN);
2003         if (ret)
2004                 return ret;
2005
2006         cmd.req.arg[1] = (enable & 1) | vid << 16;
2007
2008         qlcnic_sriov_cleanup_async_list(&sriov->bc);
2009         ret = qlcnic_issue_cmd(adapter, &cmd);
2010         if (ret) {
2011                 dev_err(&adapter->pdev->dev,
2012                         "Failed to configure guest VLAN, err=%d\n", ret);
2013         } else {
2014                 qlcnic_free_mac_list(adapter);
2015
2016                 if (enable)
2017                         qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_ADD);
2018                 else
2019                         qlcnic_sriov_vlan_operation(vf, vid, QLC_VLAN_DELETE);
2020
2021                 qlcnic_set_multi(adapter->netdev);
2022         }
2023
2024         qlcnic_free_mbx_args(&cmd);
2025         return ret;
2026 }
2027
2028 static void qlcnic_sriov_vf_free_mac_list(struct qlcnic_adapter *adapter)
2029 {
2030         struct list_head *head = &adapter->mac_list;
2031         struct qlcnic_mac_vlan_list *cur;
2032
2033         while (!list_empty(head)) {
2034                 cur = list_entry(head->next, struct qlcnic_mac_vlan_list, list);
2035                 qlcnic_sre_macaddr_change(adapter, cur->mac_addr, cur->vlan_id,
2036                                           QLCNIC_MAC_DEL);
2037                 list_del(&cur->list);
2038                 kfree(cur);
2039         }
2040 }
2041
2042
2043 static int qlcnic_sriov_vf_shutdown(struct pci_dev *pdev)
2044 {
2045         struct qlcnic_adapter *adapter = pci_get_drvdata(pdev);
2046         struct net_device *netdev = adapter->netdev;
2047         int retval;
2048
2049         netif_device_detach(netdev);
2050         qlcnic_cancel_idc_work(adapter);
2051
2052         if (netif_running(netdev))
2053                 qlcnic_down(adapter, netdev);
2054
2055         qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_TERM);
2056         qlcnic_sriov_cfg_bc_intr(adapter, 0);
2057         qlcnic_83xx_disable_mbx_intr(adapter);
2058         cancel_delayed_work_sync(&adapter->idc_aen_work);
2059
2060         retval = pci_save_state(pdev);
2061         if (retval)
2062                 return retval;
2063
2064         return 0;
2065 }
2066
2067 static int qlcnic_sriov_vf_resume(struct qlcnic_adapter *adapter)
2068 {
2069         struct qlc_83xx_idc *idc = &adapter->ahw->idc;
2070         struct net_device *netdev = adapter->netdev;
2071         int err;
2072
2073         set_bit(QLC_83XX_MODULE_LOADED, &idc->status);
2074         qlcnic_83xx_enable_mbx_interrupt(adapter);
2075         err = qlcnic_sriov_cfg_bc_intr(adapter, 1);
2076         if (err)
2077                 return err;
2078
2079         err = qlcnic_sriov_channel_cfg_cmd(adapter, QLCNIC_BC_CMD_CHANNEL_INIT);
2080         if (!err) {
2081                 if (netif_running(netdev)) {
2082                         err = qlcnic_up(adapter, netdev);
2083                         if (!err)
2084                                 qlcnic_restore_indev_addr(netdev, NETDEV_UP);
2085                 }
2086         }
2087
2088         netif_device_attach(netdev);
2089         qlcnic_schedule_work(adapter, qlcnic_sriov_vf_poll_dev_state,
2090                              idc->delay);
2091         return err;
2092 }
2093
2094 void qlcnic_sriov_alloc_vlans(struct qlcnic_adapter *adapter)
2095 {
2096         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
2097         struct qlcnic_vf_info *vf;
2098         int i;
2099
2100         for (i = 0; i < sriov->num_vfs; i++) {
2101                 vf = &sriov->vf_info[i];
2102                 vf->sriov_vlans = kcalloc(sriov->num_allowed_vlans,
2103                                           sizeof(*vf->sriov_vlans), GFP_KERNEL);
2104         }
2105 }
2106
2107 void qlcnic_sriov_free_vlans(struct qlcnic_adapter *adapter)
2108 {
2109         struct qlcnic_sriov *sriov = adapter->ahw->sriov;
2110         struct qlcnic_vf_info *vf;
2111         int i;
2112
2113         for (i = 0; i < sriov->num_vfs; i++) {
2114                 vf = &sriov->vf_info[i];
2115                 kfree(vf->sriov_vlans);
2116                 vf->sriov_vlans = NULL;
2117         }
2118 }
2119
2120 void qlcnic_sriov_add_vlan_id(struct qlcnic_sriov *sriov,
2121                               struct qlcnic_vf_info *vf, u16 vlan_id)
2122 {
2123         int i;
2124
2125         for (i = 0; i < sriov->num_allowed_vlans; i++) {
2126                 if (!vf->sriov_vlans[i]) {
2127                         vf->sriov_vlans[i] = vlan_id;
2128                         vf->num_vlan++;
2129                         return;
2130                 }
2131         }
2132 }
2133
2134 void qlcnic_sriov_del_vlan_id(struct qlcnic_sriov *sriov,
2135                               struct qlcnic_vf_info *vf, u16 vlan_id)
2136 {
2137         int i;
2138
2139         for (i = 0; i < sriov->num_allowed_vlans; i++) {
2140                 if (vf->sriov_vlans[i] == vlan_id) {
2141                         vf->sriov_vlans[i] = 0;
2142                         vf->num_vlan--;
2143                         return;
2144                 }
2145         }
2146 }
2147
2148 bool qlcnic_sriov_check_any_vlan(struct qlcnic_vf_info *vf)
2149 {
2150         bool err = false;
2151
2152         mutex_lock(&vf->vlan_list_lock);
2153
2154         if (vf->num_vlan)
2155                 err = true;
2156
2157         mutex_unlock(&vf->vlan_list_lock);
2158         return err;
2159 }