2 * Copyright (c) 2005-2010 Brocade Communications Systems, Inc.
6 * Linux driver for Brocade Fibre Channel Host Bus Adapter.
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License (GPL) Version 2 as
10 * published by the Free Software Foundation
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
18 #include <linux/uaccess.h>
23 BFA_TRC_FILE(LDRV, BSG);
26 bfad_iocmd_ioc_enable(struct bfad_s *bfad, void *cmd)
28 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
32 spin_lock_irqsave(&bfad->bfad_lock, flags);
33 /* If IOC is not in disabled state - return */
34 if (!bfa_ioc_is_disabled(&bfad->bfa.ioc)) {
35 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
36 iocmd->status = BFA_STATUS_IOC_FAILURE;
40 init_completion(&bfad->enable_comp);
41 bfa_iocfc_enable(&bfad->bfa);
42 iocmd->status = BFA_STATUS_OK;
43 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
44 wait_for_completion(&bfad->enable_comp);
50 bfad_iocmd_ioc_disable(struct bfad_s *bfad, void *cmd)
52 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
56 spin_lock_irqsave(&bfad->bfad_lock, flags);
57 if (bfad->disable_active) {
58 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
62 bfad->disable_active = BFA_TRUE;
63 init_completion(&bfad->disable_comp);
64 bfa_iocfc_disable(&bfad->bfa);
65 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
67 wait_for_completion(&bfad->disable_comp);
68 bfad->disable_active = BFA_FALSE;
69 iocmd->status = BFA_STATUS_OK;
75 bfad_iocmd_ioc_get_info(struct bfad_s *bfad, void *cmd)
78 struct bfa_bsg_ioc_info_s *iocmd = (struct bfa_bsg_ioc_info_s *)cmd;
79 struct bfad_im_port_s *im_port;
80 struct bfa_port_attr_s pattr;
83 spin_lock_irqsave(&bfad->bfad_lock, flags);
84 bfa_fcport_get_attr(&bfad->bfa, &pattr);
85 iocmd->nwwn = pattr.nwwn;
86 iocmd->pwwn = pattr.pwwn;
87 iocmd->ioc_type = bfa_get_type(&bfad->bfa);
88 iocmd->mac = bfa_get_mac(&bfad->bfa);
89 iocmd->factory_mac = bfa_get_mfg_mac(&bfad->bfa);
90 bfa_get_adapter_serial_num(&bfad->bfa, iocmd->serialnum);
91 iocmd->factorynwwn = pattr.factorynwwn;
92 iocmd->factorypwwn = pattr.factorypwwn;
93 iocmd->bfad_num = bfad->inst_no;
94 im_port = bfad->pport.im_port;
95 iocmd->host = im_port->shost->host_no;
96 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
98 strcpy(iocmd->name, bfad->adapter_name);
99 strcpy(iocmd->port_name, bfad->port_name);
100 strcpy(iocmd->hwpath, bfad->pci_name);
102 /* set adapter hw path */
103 strcpy(iocmd->adapter_hwpath, bfad->pci_name);
104 i = strlen(iocmd->adapter_hwpath) - 1;
105 while (iocmd->adapter_hwpath[i] != '.')
107 iocmd->adapter_hwpath[i] = '\0';
108 iocmd->status = BFA_STATUS_OK;
113 bfad_iocmd_ioc_get_attr(struct bfad_s *bfad, void *cmd)
115 struct bfa_bsg_ioc_attr_s *iocmd = (struct bfa_bsg_ioc_attr_s *)cmd;
118 spin_lock_irqsave(&bfad->bfad_lock, flags);
119 bfa_ioc_get_attr(&bfad->bfa.ioc, &iocmd->ioc_attr);
120 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
122 /* fill in driver attr info */
123 strcpy(iocmd->ioc_attr.driver_attr.driver, BFAD_DRIVER_NAME);
124 strncpy(iocmd->ioc_attr.driver_attr.driver_ver,
125 BFAD_DRIVER_VERSION, BFA_VERSION_LEN);
126 strcpy(iocmd->ioc_attr.driver_attr.fw_ver,
127 iocmd->ioc_attr.adapter_attr.fw_ver);
128 strcpy(iocmd->ioc_attr.driver_attr.bios_ver,
129 iocmd->ioc_attr.adapter_attr.optrom_ver);
131 /* copy chip rev info first otherwise it will be overwritten */
132 memcpy(bfad->pci_attr.chip_rev, iocmd->ioc_attr.pci_attr.chip_rev,
133 sizeof(bfad->pci_attr.chip_rev));
134 memcpy(&iocmd->ioc_attr.pci_attr, &bfad->pci_attr,
135 sizeof(struct bfa_ioc_pci_attr_s));
137 iocmd->status = BFA_STATUS_OK;
142 bfad_iocmd_ioc_get_stats(struct bfad_s *bfad, void *cmd)
144 struct bfa_bsg_ioc_stats_s *iocmd = (struct bfa_bsg_ioc_stats_s *)cmd;
146 bfa_ioc_get_stats(&bfad->bfa, &iocmd->ioc_stats);
147 iocmd->status = BFA_STATUS_OK;
152 bfad_iocmd_ioc_get_fwstats(struct bfad_s *bfad, void *cmd,
153 unsigned int payload_len)
155 struct bfa_bsg_ioc_fwstats_s *iocmd =
156 (struct bfa_bsg_ioc_fwstats_s *)cmd;
160 if (bfad_chk_iocmd_sz(payload_len,
161 sizeof(struct bfa_bsg_ioc_fwstats_s),
162 sizeof(struct bfa_fw_stats_s)) != BFA_STATUS_OK) {
163 iocmd->status = BFA_STATUS_VERSION_FAIL;
167 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_ioc_fwstats_s);
168 spin_lock_irqsave(&bfad->bfad_lock, flags);
169 iocmd->status = bfa_ioc_fw_stats_get(&bfad->bfa.ioc, iocmd_bufptr);
170 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
172 if (iocmd->status != BFA_STATUS_OK) {
173 bfa_trc(bfad, iocmd->status);
177 bfa_trc(bfad, 0x6666);
182 bfad_iocmd_ioc_reset_stats(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
184 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
187 if (v_cmd == IOCMD_IOC_RESET_STATS) {
188 bfa_ioc_clear_stats(&bfad->bfa);
189 iocmd->status = BFA_STATUS_OK;
190 } else if (v_cmd == IOCMD_IOC_RESET_FWSTATS) {
191 spin_lock_irqsave(&bfad->bfad_lock, flags);
192 iocmd->status = bfa_ioc_fw_stats_clear(&bfad->bfa.ioc);
193 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
200 bfad_iocmd_ioc_set_name(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
202 struct bfa_bsg_ioc_name_s *iocmd = (struct bfa_bsg_ioc_name_s *) cmd;
204 if (v_cmd == IOCMD_IOC_SET_ADAPTER_NAME)
205 strcpy(bfad->adapter_name, iocmd->name);
206 else if (v_cmd == IOCMD_IOC_SET_PORT_NAME)
207 strcpy(bfad->port_name, iocmd->name);
209 iocmd->status = BFA_STATUS_OK;
214 bfad_iocmd_iocfc_get_attr(struct bfad_s *bfad, void *cmd)
216 struct bfa_bsg_iocfc_attr_s *iocmd = (struct bfa_bsg_iocfc_attr_s *)cmd;
218 iocmd->status = BFA_STATUS_OK;
219 bfa_iocfc_get_attr(&bfad->bfa, &iocmd->iocfc_attr);
225 bfad_iocmd_iocfc_set_intr(struct bfad_s *bfad, void *cmd)
227 struct bfa_bsg_iocfc_intr_s *iocmd = (struct bfa_bsg_iocfc_intr_s *)cmd;
230 spin_lock_irqsave(&bfad->bfad_lock, flags);
231 iocmd->status = bfa_iocfc_israttr_set(&bfad->bfa, &iocmd->attr);
232 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
238 bfad_iocmd_port_enable(struct bfad_s *bfad, void *cmd)
240 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
241 struct bfad_hal_comp fcomp;
244 init_completion(&fcomp.comp);
245 spin_lock_irqsave(&bfad->bfad_lock, flags);
246 iocmd->status = bfa_port_enable(&bfad->bfa.modules.port,
247 bfad_hcb_comp, &fcomp);
248 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
249 if (iocmd->status != BFA_STATUS_OK) {
250 bfa_trc(bfad, iocmd->status);
253 wait_for_completion(&fcomp.comp);
254 iocmd->status = fcomp.status;
259 bfad_iocmd_port_disable(struct bfad_s *bfad, void *cmd)
261 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
262 struct bfad_hal_comp fcomp;
265 init_completion(&fcomp.comp);
266 spin_lock_irqsave(&bfad->bfad_lock, flags);
267 iocmd->status = bfa_port_disable(&bfad->bfa.modules.port,
268 bfad_hcb_comp, &fcomp);
269 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
271 if (iocmd->status != BFA_STATUS_OK) {
272 bfa_trc(bfad, iocmd->status);
275 wait_for_completion(&fcomp.comp);
276 iocmd->status = fcomp.status;
281 bfad_iocmd_port_get_attr(struct bfad_s *bfad, void *cmd)
283 struct bfa_bsg_port_attr_s *iocmd = (struct bfa_bsg_port_attr_s *)cmd;
284 struct bfa_lport_attr_s port_attr;
287 spin_lock_irqsave(&bfad->bfad_lock, flags);
288 bfa_fcport_get_attr(&bfad->bfa, &iocmd->attr);
289 bfa_fcs_lport_get_attr(&bfad->bfa_fcs.fabric.bport, &port_attr);
290 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
292 if (iocmd->attr.topology != BFA_PORT_TOPOLOGY_NONE)
293 iocmd->attr.pid = port_attr.pid;
297 iocmd->attr.port_type = port_attr.port_type;
298 iocmd->attr.loopback = port_attr.loopback;
299 iocmd->attr.authfail = port_attr.authfail;
300 strncpy(iocmd->attr.port_symname.symname,
301 port_attr.port_cfg.sym_name.symname,
302 sizeof(port_attr.port_cfg.sym_name.symname));
304 iocmd->status = BFA_STATUS_OK;
309 bfad_iocmd_port_get_stats(struct bfad_s *bfad, void *cmd,
310 unsigned int payload_len)
312 struct bfa_bsg_port_stats_s *iocmd = (struct bfa_bsg_port_stats_s *)cmd;
313 struct bfad_hal_comp fcomp;
317 if (bfad_chk_iocmd_sz(payload_len,
318 sizeof(struct bfa_bsg_port_stats_s),
319 sizeof(union bfa_port_stats_u)) != BFA_STATUS_OK) {
320 iocmd->status = BFA_STATUS_VERSION_FAIL;
324 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_port_stats_s);
326 init_completion(&fcomp.comp);
327 spin_lock_irqsave(&bfad->bfad_lock, flags);
328 iocmd->status = bfa_port_get_stats(&bfad->bfa.modules.port,
329 iocmd_bufptr, bfad_hcb_comp, &fcomp);
330 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
331 if (iocmd->status != BFA_STATUS_OK) {
332 bfa_trc(bfad, iocmd->status);
336 wait_for_completion(&fcomp.comp);
337 iocmd->status = fcomp.status;
343 bfad_iocmd_port_reset_stats(struct bfad_s *bfad, void *cmd)
345 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
346 struct bfad_hal_comp fcomp;
349 init_completion(&fcomp.comp);
350 spin_lock_irqsave(&bfad->bfad_lock, flags);
351 iocmd->status = bfa_port_clear_stats(&bfad->bfa.modules.port,
352 bfad_hcb_comp, &fcomp);
353 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
354 if (iocmd->status != BFA_STATUS_OK) {
355 bfa_trc(bfad, iocmd->status);
358 wait_for_completion(&fcomp.comp);
359 iocmd->status = fcomp.status;
364 bfad_iocmd_set_port_cfg(struct bfad_s *bfad, void *iocmd, unsigned int v_cmd)
366 struct bfa_bsg_port_cfg_s *cmd = (struct bfa_bsg_port_cfg_s *)iocmd;
369 spin_lock_irqsave(&bfad->bfad_lock, flags);
370 if (v_cmd == IOCMD_PORT_CFG_TOPO)
371 cmd->status = bfa_fcport_cfg_topology(&bfad->bfa, cmd->param);
372 else if (v_cmd == IOCMD_PORT_CFG_SPEED)
373 cmd->status = bfa_fcport_cfg_speed(&bfad->bfa, cmd->param);
374 else if (v_cmd == IOCMD_PORT_CFG_ALPA)
375 cmd->status = bfa_fcport_cfg_hardalpa(&bfad->bfa, cmd->param);
376 else if (v_cmd == IOCMD_PORT_CLR_ALPA)
377 cmd->status = bfa_fcport_clr_hardalpa(&bfad->bfa);
378 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
384 bfad_iocmd_port_cfg_maxfrsize(struct bfad_s *bfad, void *cmd)
386 struct bfa_bsg_port_cfg_maxfrsize_s *iocmd =
387 (struct bfa_bsg_port_cfg_maxfrsize_s *)cmd;
390 spin_lock_irqsave(&bfad->bfad_lock, flags);
391 iocmd->status = bfa_fcport_cfg_maxfrsize(&bfad->bfa, iocmd->maxfrsize);
392 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
398 bfad_iocmd_port_cfg_bbsc(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
400 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
401 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
404 spin_lock_irqsave(&bfad->bfad_lock, flags);
405 if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
406 if (v_cmd == IOCMD_PORT_BBSC_ENABLE)
407 fcport->cfg.bb_scn_state = BFA_TRUE;
408 else if (v_cmd == IOCMD_PORT_BBSC_DISABLE)
409 fcport->cfg.bb_scn_state = BFA_FALSE;
411 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
413 iocmd->status = BFA_STATUS_OK;
418 bfad_iocmd_lport_get_attr(struct bfad_s *bfad, void *cmd)
420 struct bfa_fcs_lport_s *fcs_port;
421 struct bfa_bsg_lport_attr_s *iocmd = (struct bfa_bsg_lport_attr_s *)cmd;
424 spin_lock_irqsave(&bfad->bfad_lock, flags);
425 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
426 iocmd->vf_id, iocmd->pwwn);
427 if (fcs_port == NULL) {
428 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
429 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
433 bfa_fcs_lport_get_attr(fcs_port, &iocmd->port_attr);
434 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
435 iocmd->status = BFA_STATUS_OK;
441 bfad_iocmd_lport_get_stats(struct bfad_s *bfad, void *cmd)
443 struct bfa_fcs_lport_s *fcs_port;
444 struct bfa_bsg_lport_stats_s *iocmd =
445 (struct bfa_bsg_lport_stats_s *)cmd;
448 spin_lock_irqsave(&bfad->bfad_lock, flags);
449 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
450 iocmd->vf_id, iocmd->pwwn);
451 if (fcs_port == NULL) {
452 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
453 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
457 bfa_fcs_lport_get_stats(fcs_port, &iocmd->port_stats);
458 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
459 iocmd->status = BFA_STATUS_OK;
465 bfad_iocmd_lport_reset_stats(struct bfad_s *bfad, void *cmd)
467 struct bfa_fcs_lport_s *fcs_port;
468 struct bfa_bsg_reset_stats_s *iocmd =
469 (struct bfa_bsg_reset_stats_s *)cmd;
470 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
471 struct list_head *qe, *qen;
472 struct bfa_itnim_s *itnim;
475 spin_lock_irqsave(&bfad->bfad_lock, flags);
476 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
477 iocmd->vf_id, iocmd->vpwwn);
478 if (fcs_port == NULL) {
479 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
480 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
484 bfa_fcs_lport_clear_stats(fcs_port);
485 /* clear IO stats from all active itnims */
486 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
487 itnim = (struct bfa_itnim_s *) qe;
488 if (itnim->rport->rport_info.lp_tag != fcs_port->lp_tag)
490 bfa_itnim_clear_stats(itnim);
492 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
493 iocmd->status = BFA_STATUS_OK;
499 bfad_iocmd_lport_get_iostats(struct bfad_s *bfad, void *cmd)
501 struct bfa_fcs_lport_s *fcs_port;
502 struct bfa_bsg_lport_iostats_s *iocmd =
503 (struct bfa_bsg_lport_iostats_s *)cmd;
506 spin_lock_irqsave(&bfad->bfad_lock, flags);
507 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
508 iocmd->vf_id, iocmd->pwwn);
509 if (fcs_port == NULL) {
510 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
511 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
515 bfa_fcpim_port_iostats(&bfad->bfa, &iocmd->iostats,
517 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
518 iocmd->status = BFA_STATUS_OK;
524 bfad_iocmd_lport_get_rports(struct bfad_s *bfad, void *cmd,
525 unsigned int payload_len)
527 struct bfa_bsg_lport_get_rports_s *iocmd =
528 (struct bfa_bsg_lport_get_rports_s *)cmd;
529 struct bfa_fcs_lport_s *fcs_port;
533 if (iocmd->nrports == 0)
536 if (bfad_chk_iocmd_sz(payload_len,
537 sizeof(struct bfa_bsg_lport_get_rports_s),
538 sizeof(struct bfa_rport_qualifier_s) * iocmd->nrports)
540 iocmd->status = BFA_STATUS_VERSION_FAIL;
544 iocmd_bufptr = (char *)iocmd +
545 sizeof(struct bfa_bsg_lport_get_rports_s);
546 spin_lock_irqsave(&bfad->bfad_lock, flags);
547 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
548 iocmd->vf_id, iocmd->pwwn);
549 if (fcs_port == NULL) {
550 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
552 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
556 bfa_fcs_lport_get_rport_quals(fcs_port,
557 (struct bfa_rport_qualifier_s *)iocmd_bufptr,
559 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
560 iocmd->status = BFA_STATUS_OK;
566 bfad_iocmd_rport_get_attr(struct bfad_s *bfad, void *cmd)
568 struct bfa_bsg_rport_attr_s *iocmd = (struct bfa_bsg_rport_attr_s *)cmd;
569 struct bfa_fcs_lport_s *fcs_port;
570 struct bfa_fcs_rport_s *fcs_rport;
573 spin_lock_irqsave(&bfad->bfad_lock, flags);
574 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
575 iocmd->vf_id, iocmd->pwwn);
576 if (fcs_port == NULL) {
578 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
579 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
584 fcs_rport = bfa_fcs_lport_get_rport_by_qualifier(fcs_port,
585 iocmd->rpwwn, iocmd->pid);
587 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
588 if (fcs_rport == NULL) {
590 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
591 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
595 bfa_fcs_rport_get_attr(fcs_rport, &iocmd->attr);
596 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
597 iocmd->status = BFA_STATUS_OK;
603 bfad_iocmd_rport_get_addr(struct bfad_s *bfad, void *cmd)
605 struct bfa_bsg_rport_scsi_addr_s *iocmd =
606 (struct bfa_bsg_rport_scsi_addr_s *)cmd;
607 struct bfa_fcs_lport_s *fcs_port;
608 struct bfa_fcs_itnim_s *fcs_itnim;
609 struct bfad_itnim_s *drv_itnim;
612 spin_lock_irqsave(&bfad->bfad_lock, flags);
613 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
614 iocmd->vf_id, iocmd->pwwn);
615 if (fcs_port == NULL) {
617 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
618 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
622 fcs_itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
623 if (fcs_itnim == NULL) {
625 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
626 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
630 drv_itnim = fcs_itnim->itnim_drv;
632 if (drv_itnim && drv_itnim->im_port)
633 iocmd->host = drv_itnim->im_port->shost->host_no;
636 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
637 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
641 iocmd->target = drv_itnim->scsi_tgt_id;
642 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
646 iocmd->status = BFA_STATUS_OK;
652 bfad_iocmd_rport_get_stats(struct bfad_s *bfad, void *cmd)
654 struct bfa_bsg_rport_stats_s *iocmd =
655 (struct bfa_bsg_rport_stats_s *)cmd;
656 struct bfa_fcs_lport_s *fcs_port;
657 struct bfa_fcs_rport_s *fcs_rport;
660 spin_lock_irqsave(&bfad->bfad_lock, flags);
661 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
662 iocmd->vf_id, iocmd->pwwn);
663 if (fcs_port == NULL) {
665 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
666 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
670 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
671 if (fcs_rport == NULL) {
673 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
674 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
678 memcpy((void *)&iocmd->stats, (void *)&fcs_rport->stats,
679 sizeof(struct bfa_rport_stats_s));
680 if (bfa_fcs_rport_get_halrport(fcs_rport)) {
681 memcpy((void *)&iocmd->stats.hal_stats,
682 (void *)&(bfa_fcs_rport_get_halrport(fcs_rport)->stats),
683 sizeof(struct bfa_rport_hal_stats_s));
686 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
687 iocmd->status = BFA_STATUS_OK;
693 bfad_iocmd_rport_clr_stats(struct bfad_s *bfad, void *cmd)
695 struct bfa_bsg_rport_reset_stats_s *iocmd =
696 (struct bfa_bsg_rport_reset_stats_s *)cmd;
697 struct bfa_fcs_lport_s *fcs_port;
698 struct bfa_fcs_rport_s *fcs_rport;
699 struct bfa_rport_s *rport;
702 spin_lock_irqsave(&bfad->bfad_lock, flags);
703 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
704 iocmd->vf_id, iocmd->pwwn);
705 if (fcs_port == NULL) {
706 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
707 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
711 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
712 if (fcs_rport == NULL) {
713 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
714 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
718 memset((char *)&fcs_rport->stats, 0, sizeof(struct bfa_rport_stats_s));
719 rport = bfa_fcs_rport_get_halrport(fcs_rport);
721 memset(&rport->stats, 0, sizeof(rport->stats));
722 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
723 iocmd->status = BFA_STATUS_OK;
729 bfad_iocmd_rport_set_speed(struct bfad_s *bfad, void *cmd)
731 struct bfa_bsg_rport_set_speed_s *iocmd =
732 (struct bfa_bsg_rport_set_speed_s *)cmd;
733 struct bfa_fcs_lport_s *fcs_port;
734 struct bfa_fcs_rport_s *fcs_rport;
737 spin_lock_irqsave(&bfad->bfad_lock, flags);
738 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
739 iocmd->vf_id, iocmd->pwwn);
740 if (fcs_port == NULL) {
741 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
742 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
746 fcs_rport = bfa_fcs_rport_lookup(fcs_port, iocmd->rpwwn);
747 if (fcs_rport == NULL) {
748 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
749 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
753 fcs_rport->rpf.assigned_speed = iocmd->speed;
754 /* Set this speed in f/w only if the RPSC speed is not available */
755 if (fcs_rport->rpf.rpsc_speed == BFA_PORT_SPEED_UNKNOWN)
756 if (fcs_rport->bfa_rport)
757 bfa_rport_speed(fcs_rport->bfa_rport, iocmd->speed);
758 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
759 iocmd->status = BFA_STATUS_OK;
765 bfad_iocmd_vport_get_attr(struct bfad_s *bfad, void *cmd)
767 struct bfa_fcs_vport_s *fcs_vport;
768 struct bfa_bsg_vport_attr_s *iocmd = (struct bfa_bsg_vport_attr_s *)cmd;
771 spin_lock_irqsave(&bfad->bfad_lock, flags);
772 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
773 iocmd->vf_id, iocmd->vpwwn);
774 if (fcs_vport == NULL) {
775 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
776 iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
780 bfa_fcs_vport_get_attr(fcs_vport, &iocmd->vport_attr);
781 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
782 iocmd->status = BFA_STATUS_OK;
788 bfad_iocmd_vport_get_stats(struct bfad_s *bfad, void *cmd)
790 struct bfa_fcs_vport_s *fcs_vport;
791 struct bfa_bsg_vport_stats_s *iocmd =
792 (struct bfa_bsg_vport_stats_s *)cmd;
795 spin_lock_irqsave(&bfad->bfad_lock, flags);
796 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
797 iocmd->vf_id, iocmd->vpwwn);
798 if (fcs_vport == NULL) {
799 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
800 iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
804 memcpy((void *)&iocmd->vport_stats, (void *)&fcs_vport->vport_stats,
805 sizeof(struct bfa_vport_stats_s));
806 memcpy((void *)&iocmd->vport_stats.port_stats,
807 (void *)&fcs_vport->lport.stats,
808 sizeof(struct bfa_lport_stats_s));
809 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
810 iocmd->status = BFA_STATUS_OK;
816 bfad_iocmd_vport_clr_stats(struct bfad_s *bfad, void *cmd)
818 struct bfa_fcs_vport_s *fcs_vport;
819 struct bfa_bsg_reset_stats_s *iocmd =
820 (struct bfa_bsg_reset_stats_s *)cmd;
823 spin_lock_irqsave(&bfad->bfad_lock, flags);
824 fcs_vport = bfa_fcs_vport_lookup(&bfad->bfa_fcs,
825 iocmd->vf_id, iocmd->vpwwn);
826 if (fcs_vport == NULL) {
827 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
828 iocmd->status = BFA_STATUS_UNKNOWN_VWWN;
832 memset(&fcs_vport->vport_stats, 0, sizeof(struct bfa_vport_stats_s));
833 memset(&fcs_vport->lport.stats, 0, sizeof(struct bfa_lport_stats_s));
834 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
835 iocmd->status = BFA_STATUS_OK;
841 bfad_iocmd_fabric_get_lports(struct bfad_s *bfad, void *cmd,
842 unsigned int payload_len)
844 struct bfa_bsg_fabric_get_lports_s *iocmd =
845 (struct bfa_bsg_fabric_get_lports_s *)cmd;
846 bfa_fcs_vf_t *fcs_vf;
847 uint32_t nports = iocmd->nports;
852 iocmd->status = BFA_STATUS_EINVAL;
856 if (bfad_chk_iocmd_sz(payload_len,
857 sizeof(struct bfa_bsg_fabric_get_lports_s),
858 sizeof(wwn_t[iocmd->nports])) != BFA_STATUS_OK) {
859 iocmd->status = BFA_STATUS_VERSION_FAIL;
863 iocmd_bufptr = (char *)iocmd +
864 sizeof(struct bfa_bsg_fabric_get_lports_s);
866 spin_lock_irqsave(&bfad->bfad_lock, flags);
867 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
868 if (fcs_vf == NULL) {
869 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
870 iocmd->status = BFA_STATUS_UNKNOWN_VFID;
873 bfa_fcs_vf_get_ports(fcs_vf, (wwn_t *)iocmd_bufptr, &nports);
874 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
876 iocmd->nports = nports;
877 iocmd->status = BFA_STATUS_OK;
883 bfad_iocmd_ratelim(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
885 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
886 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
889 spin_lock_irqsave(&bfad->bfad_lock, flags);
891 if (cmd == IOCMD_RATELIM_ENABLE)
892 fcport->cfg.ratelimit = BFA_TRUE;
893 else if (cmd == IOCMD_RATELIM_DISABLE)
894 fcport->cfg.ratelimit = BFA_FALSE;
896 if (fcport->cfg.trl_def_speed == BFA_PORT_SPEED_UNKNOWN)
897 fcport->cfg.trl_def_speed = BFA_PORT_SPEED_1GBPS;
899 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
900 iocmd->status = BFA_STATUS_OK;
906 bfad_iocmd_ratelim_speed(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
908 struct bfa_bsg_trl_speed_s *iocmd = (struct bfa_bsg_trl_speed_s *)pcmd;
909 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
912 spin_lock_irqsave(&bfad->bfad_lock, flags);
914 /* Auto and speeds greater than the supported speed, are invalid */
915 if ((iocmd->speed == BFA_PORT_SPEED_AUTO) ||
916 (iocmd->speed > fcport->speed_sup)) {
917 iocmd->status = BFA_STATUS_UNSUPP_SPEED;
918 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
922 fcport->cfg.trl_def_speed = iocmd->speed;
923 iocmd->status = BFA_STATUS_OK;
924 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
930 bfad_iocmd_cfg_fcpim(struct bfad_s *bfad, void *cmd)
932 struct bfa_bsg_fcpim_s *iocmd = (struct bfa_bsg_fcpim_s *)cmd;
935 spin_lock_irqsave(&bfad->bfad_lock, flags);
936 bfa_fcpim_path_tov_set(&bfad->bfa, iocmd->param);
937 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
938 iocmd->status = BFA_STATUS_OK;
943 bfad_iocmd_fcpim_get_modstats(struct bfad_s *bfad, void *cmd)
945 struct bfa_bsg_fcpim_modstats_s *iocmd =
946 (struct bfa_bsg_fcpim_modstats_s *)cmd;
947 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
948 struct list_head *qe, *qen;
949 struct bfa_itnim_s *itnim;
952 spin_lock_irqsave(&bfad->bfad_lock, flags);
953 /* accumulate IO stats from itnim */
954 memset((void *)&iocmd->modstats, 0, sizeof(struct bfa_itnim_iostats_s));
955 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
956 itnim = (struct bfa_itnim_s *) qe;
957 bfa_fcpim_add_stats(&iocmd->modstats, &(itnim->stats));
959 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
960 iocmd->status = BFA_STATUS_OK;
965 bfad_iocmd_fcpim_clr_modstats(struct bfad_s *bfad, void *cmd)
967 struct bfa_bsg_fcpim_modstatsclr_s *iocmd =
968 (struct bfa_bsg_fcpim_modstatsclr_s *)cmd;
969 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
970 struct list_head *qe, *qen;
971 struct bfa_itnim_s *itnim;
974 spin_lock_irqsave(&bfad->bfad_lock, flags);
975 list_for_each_safe(qe, qen, &fcpim->itnim_q) {
976 itnim = (struct bfa_itnim_s *) qe;
977 bfa_itnim_clear_stats(itnim);
979 memset(&fcpim->del_itn_stats, 0,
980 sizeof(struct bfa_fcpim_del_itn_stats_s));
981 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
982 iocmd->status = BFA_STATUS_OK;
987 bfad_iocmd_fcpim_get_del_itn_stats(struct bfad_s *bfad, void *cmd)
989 struct bfa_bsg_fcpim_del_itn_stats_s *iocmd =
990 (struct bfa_bsg_fcpim_del_itn_stats_s *)cmd;
991 struct bfa_fcpim_s *fcpim = BFA_FCPIM(&bfad->bfa);
994 spin_lock_irqsave(&bfad->bfad_lock, flags);
995 memcpy((void *)&iocmd->modstats, (void *)&fcpim->del_itn_stats,
996 sizeof(struct bfa_fcpim_del_itn_stats_s));
997 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
999 iocmd->status = BFA_STATUS_OK;
1004 bfad_iocmd_itnim_get_attr(struct bfad_s *bfad, void *cmd)
1006 struct bfa_bsg_itnim_attr_s *iocmd = (struct bfa_bsg_itnim_attr_s *)cmd;
1007 struct bfa_fcs_lport_s *fcs_port;
1008 unsigned long flags;
1010 spin_lock_irqsave(&bfad->bfad_lock, flags);
1011 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1012 iocmd->vf_id, iocmd->lpwwn);
1014 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
1016 iocmd->status = bfa_fcs_itnim_attr_get(fcs_port,
1017 iocmd->rpwwn, &iocmd->attr);
1018 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1023 bfad_iocmd_itnim_get_iostats(struct bfad_s *bfad, void *cmd)
1025 struct bfa_bsg_itnim_iostats_s *iocmd =
1026 (struct bfa_bsg_itnim_iostats_s *)cmd;
1027 struct bfa_fcs_lport_s *fcs_port;
1028 struct bfa_fcs_itnim_s *itnim;
1029 unsigned long flags;
1031 spin_lock_irqsave(&bfad->bfad_lock, flags);
1032 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1033 iocmd->vf_id, iocmd->lpwwn);
1035 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
1038 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
1040 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
1042 iocmd->status = BFA_STATUS_OK;
1043 if (bfa_fcs_itnim_get_halitn(itnim))
1044 memcpy((void *)&iocmd->iostats, (void *)
1045 &(bfa_fcs_itnim_get_halitn(itnim)->stats),
1046 sizeof(struct bfa_itnim_iostats_s));
1049 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1054 bfad_iocmd_itnim_reset_stats(struct bfad_s *bfad, void *cmd)
1056 struct bfa_bsg_rport_reset_stats_s *iocmd =
1057 (struct bfa_bsg_rport_reset_stats_s *)cmd;
1058 struct bfa_fcs_lport_s *fcs_port;
1059 struct bfa_fcs_itnim_s *itnim;
1060 unsigned long flags;
1062 spin_lock_irqsave(&bfad->bfad_lock, flags);
1063 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1064 iocmd->vf_id, iocmd->pwwn);
1066 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
1068 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
1070 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
1072 iocmd->status = BFA_STATUS_OK;
1073 bfa_fcs_itnim_stats_clear(fcs_port, iocmd->rpwwn);
1074 bfa_itnim_clear_stats(bfa_fcs_itnim_get_halitn(itnim));
1077 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1083 bfad_iocmd_itnim_get_itnstats(struct bfad_s *bfad, void *cmd)
1085 struct bfa_bsg_itnim_itnstats_s *iocmd =
1086 (struct bfa_bsg_itnim_itnstats_s *)cmd;
1087 struct bfa_fcs_lport_s *fcs_port;
1088 struct bfa_fcs_itnim_s *itnim;
1089 unsigned long flags;
1091 spin_lock_irqsave(&bfad->bfad_lock, flags);
1092 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1093 iocmd->vf_id, iocmd->lpwwn);
1095 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
1098 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
1100 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
1102 iocmd->status = BFA_STATUS_OK;
1103 bfa_fcs_itnim_stats_get(fcs_port, iocmd->rpwwn,
1107 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1112 bfad_iocmd_fcport_enable(struct bfad_s *bfad, void *cmd)
1114 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1115 unsigned long flags;
1117 spin_lock_irqsave(&bfad->bfad_lock, flags);
1118 iocmd->status = bfa_fcport_enable(&bfad->bfa);
1119 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1125 bfad_iocmd_fcport_disable(struct bfad_s *bfad, void *cmd)
1127 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1128 unsigned long flags;
1130 spin_lock_irqsave(&bfad->bfad_lock, flags);
1131 iocmd->status = bfa_fcport_disable(&bfad->bfa);
1132 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1138 bfad_iocmd_ioc_get_pcifn_cfg(struct bfad_s *bfad, void *cmd)
1140 struct bfa_bsg_pcifn_cfg_s *iocmd = (struct bfa_bsg_pcifn_cfg_s *)cmd;
1141 struct bfad_hal_comp fcomp;
1142 unsigned long flags;
1144 init_completion(&fcomp.comp);
1145 spin_lock_irqsave(&bfad->bfad_lock, flags);
1146 iocmd->status = bfa_ablk_query(&bfad->bfa.modules.ablk,
1148 bfad_hcb_comp, &fcomp);
1149 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1150 if (iocmd->status != BFA_STATUS_OK)
1153 wait_for_completion(&fcomp.comp);
1154 iocmd->status = fcomp.status;
1160 bfad_iocmd_pcifn_create(struct bfad_s *bfad, void *cmd)
1162 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
1163 struct bfad_hal_comp fcomp;
1164 unsigned long flags;
1166 init_completion(&fcomp.comp);
1167 spin_lock_irqsave(&bfad->bfad_lock, flags);
1168 iocmd->status = bfa_ablk_pf_create(&bfad->bfa.modules.ablk,
1169 &iocmd->pcifn_id, iocmd->port,
1170 iocmd->pcifn_class, iocmd->bandwidth,
1171 bfad_hcb_comp, &fcomp);
1172 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1173 if (iocmd->status != BFA_STATUS_OK)
1176 wait_for_completion(&fcomp.comp);
1177 iocmd->status = fcomp.status;
1183 bfad_iocmd_pcifn_delete(struct bfad_s *bfad, void *cmd)
1185 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
1186 struct bfad_hal_comp fcomp;
1187 unsigned long flags;
1189 init_completion(&fcomp.comp);
1190 spin_lock_irqsave(&bfad->bfad_lock, flags);
1191 iocmd->status = bfa_ablk_pf_delete(&bfad->bfa.modules.ablk,
1193 bfad_hcb_comp, &fcomp);
1194 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1195 if (iocmd->status != BFA_STATUS_OK)
1198 wait_for_completion(&fcomp.comp);
1199 iocmd->status = fcomp.status;
1205 bfad_iocmd_pcifn_bw(struct bfad_s *bfad, void *cmd)
1207 struct bfa_bsg_pcifn_s *iocmd = (struct bfa_bsg_pcifn_s *)cmd;
1208 struct bfad_hal_comp fcomp;
1209 unsigned long flags;
1211 init_completion(&fcomp.comp);
1212 spin_lock_irqsave(&bfad->bfad_lock, flags);
1213 iocmd->status = bfa_ablk_pf_update(&bfad->bfa.modules.ablk,
1214 iocmd->pcifn_id, iocmd->bandwidth,
1215 bfad_hcb_comp, &fcomp);
1216 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1217 bfa_trc(bfad, iocmd->status);
1218 if (iocmd->status != BFA_STATUS_OK)
1221 wait_for_completion(&fcomp.comp);
1222 iocmd->status = fcomp.status;
1223 bfa_trc(bfad, iocmd->status);
1229 bfad_iocmd_adapter_cfg_mode(struct bfad_s *bfad, void *cmd)
1231 struct bfa_bsg_adapter_cfg_mode_s *iocmd =
1232 (struct bfa_bsg_adapter_cfg_mode_s *)cmd;
1233 struct bfad_hal_comp fcomp;
1234 unsigned long flags = 0;
1236 init_completion(&fcomp.comp);
1237 spin_lock_irqsave(&bfad->bfad_lock, flags);
1238 iocmd->status = bfa_ablk_adapter_config(&bfad->bfa.modules.ablk,
1239 iocmd->cfg.mode, iocmd->cfg.max_pf,
1240 iocmd->cfg.max_vf, bfad_hcb_comp, &fcomp);
1241 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1242 if (iocmd->status != BFA_STATUS_OK)
1245 wait_for_completion(&fcomp.comp);
1246 iocmd->status = fcomp.status;
1252 bfad_iocmd_port_cfg_mode(struct bfad_s *bfad, void *cmd)
1254 struct bfa_bsg_port_cfg_mode_s *iocmd =
1255 (struct bfa_bsg_port_cfg_mode_s *)cmd;
1256 struct bfad_hal_comp fcomp;
1257 unsigned long flags = 0;
1259 init_completion(&fcomp.comp);
1260 spin_lock_irqsave(&bfad->bfad_lock, flags);
1261 iocmd->status = bfa_ablk_port_config(&bfad->bfa.modules.ablk,
1262 iocmd->instance, iocmd->cfg.mode,
1263 iocmd->cfg.max_pf, iocmd->cfg.max_vf,
1264 bfad_hcb_comp, &fcomp);
1265 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1266 if (iocmd->status != BFA_STATUS_OK)
1269 wait_for_completion(&fcomp.comp);
1270 iocmd->status = fcomp.status;
1276 bfad_iocmd_ablk_optrom(struct bfad_s *bfad, unsigned int cmd, void *pcmd)
1278 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
1279 struct bfad_hal_comp fcomp;
1280 unsigned long flags;
1282 init_completion(&fcomp.comp);
1283 spin_lock_irqsave(&bfad->bfad_lock, flags);
1284 if (cmd == IOCMD_FLASH_ENABLE_OPTROM)
1285 iocmd->status = bfa_ablk_optrom_en(&bfad->bfa.modules.ablk,
1286 bfad_hcb_comp, &fcomp);
1288 iocmd->status = bfa_ablk_optrom_dis(&bfad->bfa.modules.ablk,
1289 bfad_hcb_comp, &fcomp);
1290 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1292 if (iocmd->status != BFA_STATUS_OK)
1295 wait_for_completion(&fcomp.comp);
1296 iocmd->status = fcomp.status;
1302 bfad_iocmd_faa_query(struct bfad_s *bfad, void *cmd)
1304 struct bfa_bsg_faa_attr_s *iocmd = (struct bfa_bsg_faa_attr_s *)cmd;
1305 struct bfad_hal_comp fcomp;
1306 unsigned long flags;
1308 init_completion(&fcomp.comp);
1309 iocmd->status = BFA_STATUS_OK;
1310 spin_lock_irqsave(&bfad->bfad_lock, flags);
1311 iocmd->status = bfa_faa_query(&bfad->bfa, &iocmd->faa_attr,
1312 bfad_hcb_comp, &fcomp);
1313 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1315 if (iocmd->status != BFA_STATUS_OK)
1318 wait_for_completion(&fcomp.comp);
1319 iocmd->status = fcomp.status;
1325 bfad_iocmd_cee_attr(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
1327 struct bfa_bsg_cee_attr_s *iocmd =
1328 (struct bfa_bsg_cee_attr_s *)cmd;
1330 struct bfad_hal_comp cee_comp;
1331 unsigned long flags;
1333 if (bfad_chk_iocmd_sz(payload_len,
1334 sizeof(struct bfa_bsg_cee_attr_s),
1335 sizeof(struct bfa_cee_attr_s)) != BFA_STATUS_OK) {
1336 iocmd->status = BFA_STATUS_VERSION_FAIL;
1340 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_attr_s);
1342 cee_comp.status = 0;
1343 init_completion(&cee_comp.comp);
1344 mutex_lock(&bfad_mutex);
1345 spin_lock_irqsave(&bfad->bfad_lock, flags);
1346 iocmd->status = bfa_cee_get_attr(&bfad->bfa.modules.cee, iocmd_bufptr,
1347 bfad_hcb_comp, &cee_comp);
1348 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1349 if (iocmd->status != BFA_STATUS_OK) {
1350 mutex_unlock(&bfad_mutex);
1351 bfa_trc(bfad, 0x5555);
1354 wait_for_completion(&cee_comp.comp);
1355 mutex_unlock(&bfad_mutex);
1361 bfad_iocmd_cee_get_stats(struct bfad_s *bfad, void *cmd,
1362 unsigned int payload_len)
1364 struct bfa_bsg_cee_stats_s *iocmd =
1365 (struct bfa_bsg_cee_stats_s *)cmd;
1367 struct bfad_hal_comp cee_comp;
1368 unsigned long flags;
1370 if (bfad_chk_iocmd_sz(payload_len,
1371 sizeof(struct bfa_bsg_cee_stats_s),
1372 sizeof(struct bfa_cee_stats_s)) != BFA_STATUS_OK) {
1373 iocmd->status = BFA_STATUS_VERSION_FAIL;
1377 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_cee_stats_s);
1379 cee_comp.status = 0;
1380 init_completion(&cee_comp.comp);
1381 mutex_lock(&bfad_mutex);
1382 spin_lock_irqsave(&bfad->bfad_lock, flags);
1383 iocmd->status = bfa_cee_get_stats(&bfad->bfa.modules.cee, iocmd_bufptr,
1384 bfad_hcb_comp, &cee_comp);
1385 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1386 if (iocmd->status != BFA_STATUS_OK) {
1387 mutex_unlock(&bfad_mutex);
1388 bfa_trc(bfad, 0x5555);
1391 wait_for_completion(&cee_comp.comp);
1392 mutex_unlock(&bfad_mutex);
1398 bfad_iocmd_cee_reset_stats(struct bfad_s *bfad, void *cmd)
1400 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1401 unsigned long flags;
1403 spin_lock_irqsave(&bfad->bfad_lock, flags);
1404 iocmd->status = bfa_cee_reset_stats(&bfad->bfa.modules.cee, NULL, NULL);
1405 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1406 if (iocmd->status != BFA_STATUS_OK)
1407 bfa_trc(bfad, 0x5555);
1412 bfad_iocmd_sfp_media(struct bfad_s *bfad, void *cmd)
1414 struct bfa_bsg_sfp_media_s *iocmd = (struct bfa_bsg_sfp_media_s *)cmd;
1415 struct bfad_hal_comp fcomp;
1416 unsigned long flags;
1418 init_completion(&fcomp.comp);
1419 spin_lock_irqsave(&bfad->bfad_lock, flags);
1420 iocmd->status = bfa_sfp_media(BFA_SFP_MOD(&bfad->bfa), &iocmd->media,
1421 bfad_hcb_comp, &fcomp);
1422 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1423 bfa_trc(bfad, iocmd->status);
1424 if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
1427 wait_for_completion(&fcomp.comp);
1428 iocmd->status = fcomp.status;
1434 bfad_iocmd_sfp_speed(struct bfad_s *bfad, void *cmd)
1436 struct bfa_bsg_sfp_speed_s *iocmd = (struct bfa_bsg_sfp_speed_s *)cmd;
1437 struct bfad_hal_comp fcomp;
1438 unsigned long flags;
1440 init_completion(&fcomp.comp);
1441 spin_lock_irqsave(&bfad->bfad_lock, flags);
1442 iocmd->status = bfa_sfp_speed(BFA_SFP_MOD(&bfad->bfa), iocmd->speed,
1443 bfad_hcb_comp, &fcomp);
1444 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1445 bfa_trc(bfad, iocmd->status);
1446 if (iocmd->status != BFA_STATUS_SFP_NOT_READY)
1448 wait_for_completion(&fcomp.comp);
1449 iocmd->status = fcomp.status;
1455 bfad_iocmd_flash_get_attr(struct bfad_s *bfad, void *cmd)
1457 struct bfa_bsg_flash_attr_s *iocmd =
1458 (struct bfa_bsg_flash_attr_s *)cmd;
1459 struct bfad_hal_comp fcomp;
1460 unsigned long flags;
1462 init_completion(&fcomp.comp);
1463 spin_lock_irqsave(&bfad->bfad_lock, flags);
1464 iocmd->status = bfa_flash_get_attr(BFA_FLASH(&bfad->bfa), &iocmd->attr,
1465 bfad_hcb_comp, &fcomp);
1466 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1467 if (iocmd->status != BFA_STATUS_OK)
1469 wait_for_completion(&fcomp.comp);
1470 iocmd->status = fcomp.status;
1476 bfad_iocmd_flash_erase_part(struct bfad_s *bfad, void *cmd)
1478 struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
1479 struct bfad_hal_comp fcomp;
1480 unsigned long flags;
1482 init_completion(&fcomp.comp);
1483 spin_lock_irqsave(&bfad->bfad_lock, flags);
1484 iocmd->status = bfa_flash_erase_part(BFA_FLASH(&bfad->bfa), iocmd->type,
1485 iocmd->instance, bfad_hcb_comp, &fcomp);
1486 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1487 if (iocmd->status != BFA_STATUS_OK)
1489 wait_for_completion(&fcomp.comp);
1490 iocmd->status = fcomp.status;
1496 bfad_iocmd_flash_update_part(struct bfad_s *bfad, void *cmd,
1497 unsigned int payload_len)
1499 struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
1501 struct bfad_hal_comp fcomp;
1502 unsigned long flags;
1504 if (bfad_chk_iocmd_sz(payload_len,
1505 sizeof(struct bfa_bsg_flash_s),
1506 iocmd->bufsz) != BFA_STATUS_OK) {
1507 iocmd->status = BFA_STATUS_VERSION_FAIL;
1511 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
1513 init_completion(&fcomp.comp);
1514 spin_lock_irqsave(&bfad->bfad_lock, flags);
1515 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
1516 iocmd->type, iocmd->instance, iocmd_bufptr,
1517 iocmd->bufsz, 0, bfad_hcb_comp, &fcomp);
1518 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1519 if (iocmd->status != BFA_STATUS_OK)
1521 wait_for_completion(&fcomp.comp);
1522 iocmd->status = fcomp.status;
1528 bfad_iocmd_flash_read_part(struct bfad_s *bfad, void *cmd,
1529 unsigned int payload_len)
1531 struct bfa_bsg_flash_s *iocmd = (struct bfa_bsg_flash_s *)cmd;
1532 struct bfad_hal_comp fcomp;
1534 unsigned long flags;
1536 if (bfad_chk_iocmd_sz(payload_len,
1537 sizeof(struct bfa_bsg_flash_s),
1538 iocmd->bufsz) != BFA_STATUS_OK) {
1539 iocmd->status = BFA_STATUS_VERSION_FAIL;
1543 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_flash_s);
1545 init_completion(&fcomp.comp);
1546 spin_lock_irqsave(&bfad->bfad_lock, flags);
1547 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa), iocmd->type,
1548 iocmd->instance, iocmd_bufptr, iocmd->bufsz, 0,
1549 bfad_hcb_comp, &fcomp);
1550 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1551 if (iocmd->status != BFA_STATUS_OK)
1553 wait_for_completion(&fcomp.comp);
1554 iocmd->status = fcomp.status;
1560 bfad_iocmd_diag_temp(struct bfad_s *bfad, void *cmd)
1562 struct bfa_bsg_diag_get_temp_s *iocmd =
1563 (struct bfa_bsg_diag_get_temp_s *)cmd;
1564 struct bfad_hal_comp fcomp;
1565 unsigned long flags;
1567 init_completion(&fcomp.comp);
1568 spin_lock_irqsave(&bfad->bfad_lock, flags);
1569 iocmd->status = bfa_diag_tsensor_query(BFA_DIAG_MOD(&bfad->bfa),
1570 &iocmd->result, bfad_hcb_comp, &fcomp);
1571 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1572 bfa_trc(bfad, iocmd->status);
1573 if (iocmd->status != BFA_STATUS_OK)
1575 wait_for_completion(&fcomp.comp);
1576 iocmd->status = fcomp.status;
1582 bfad_iocmd_diag_memtest(struct bfad_s *bfad, void *cmd)
1584 struct bfa_bsg_diag_memtest_s *iocmd =
1585 (struct bfa_bsg_diag_memtest_s *)cmd;
1586 struct bfad_hal_comp fcomp;
1587 unsigned long flags;
1589 init_completion(&fcomp.comp);
1590 spin_lock_irqsave(&bfad->bfad_lock, flags);
1591 iocmd->status = bfa_diag_memtest(BFA_DIAG_MOD(&bfad->bfa),
1592 &iocmd->memtest, iocmd->pat,
1593 &iocmd->result, bfad_hcb_comp, &fcomp);
1594 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1595 bfa_trc(bfad, iocmd->status);
1596 if (iocmd->status != BFA_STATUS_OK)
1598 wait_for_completion(&fcomp.comp);
1599 iocmd->status = fcomp.status;
1605 bfad_iocmd_diag_loopback(struct bfad_s *bfad, void *cmd)
1607 struct bfa_bsg_diag_loopback_s *iocmd =
1608 (struct bfa_bsg_diag_loopback_s *)cmd;
1609 struct bfad_hal_comp fcomp;
1610 unsigned long flags;
1612 init_completion(&fcomp.comp);
1613 spin_lock_irqsave(&bfad->bfad_lock, flags);
1614 iocmd->status = bfa_fcdiag_loopback(&bfad->bfa, iocmd->opmode,
1615 iocmd->speed, iocmd->lpcnt, iocmd->pat,
1616 &iocmd->result, bfad_hcb_comp, &fcomp);
1617 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1618 bfa_trc(bfad, iocmd->status);
1619 if (iocmd->status != BFA_STATUS_OK)
1621 wait_for_completion(&fcomp.comp);
1622 iocmd->status = fcomp.status;
1628 bfad_iocmd_diag_fwping(struct bfad_s *bfad, void *cmd)
1630 struct bfa_bsg_diag_fwping_s *iocmd =
1631 (struct bfa_bsg_diag_fwping_s *)cmd;
1632 struct bfad_hal_comp fcomp;
1633 unsigned long flags;
1635 init_completion(&fcomp.comp);
1636 spin_lock_irqsave(&bfad->bfad_lock, flags);
1637 iocmd->status = bfa_diag_fwping(BFA_DIAG_MOD(&bfad->bfa), iocmd->cnt,
1638 iocmd->pattern, &iocmd->result,
1639 bfad_hcb_comp, &fcomp);
1640 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1641 bfa_trc(bfad, iocmd->status);
1642 if (iocmd->status != BFA_STATUS_OK)
1644 bfa_trc(bfad, 0x77771);
1645 wait_for_completion(&fcomp.comp);
1646 iocmd->status = fcomp.status;
1652 bfad_iocmd_diag_queuetest(struct bfad_s *bfad, void *cmd)
1654 struct bfa_bsg_diag_qtest_s *iocmd = (struct bfa_bsg_diag_qtest_s *)cmd;
1655 struct bfad_hal_comp fcomp;
1656 unsigned long flags;
1658 init_completion(&fcomp.comp);
1659 spin_lock_irqsave(&bfad->bfad_lock, flags);
1660 iocmd->status = bfa_fcdiag_queuetest(&bfad->bfa, iocmd->force,
1661 iocmd->queue, &iocmd->result,
1662 bfad_hcb_comp, &fcomp);
1663 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1664 if (iocmd->status != BFA_STATUS_OK)
1666 wait_for_completion(&fcomp.comp);
1667 iocmd->status = fcomp.status;
1673 bfad_iocmd_diag_sfp(struct bfad_s *bfad, void *cmd)
1675 struct bfa_bsg_sfp_show_s *iocmd =
1676 (struct bfa_bsg_sfp_show_s *)cmd;
1677 struct bfad_hal_comp fcomp;
1678 unsigned long flags;
1680 init_completion(&fcomp.comp);
1681 spin_lock_irqsave(&bfad->bfad_lock, flags);
1682 iocmd->status = bfa_sfp_show(BFA_SFP_MOD(&bfad->bfa), &iocmd->sfp,
1683 bfad_hcb_comp, &fcomp);
1684 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1685 bfa_trc(bfad, iocmd->status);
1686 if (iocmd->status != BFA_STATUS_OK)
1688 wait_for_completion(&fcomp.comp);
1689 iocmd->status = fcomp.status;
1690 bfa_trc(bfad, iocmd->status);
1696 bfad_iocmd_diag_led(struct bfad_s *bfad, void *cmd)
1698 struct bfa_bsg_diag_led_s *iocmd = (struct bfa_bsg_diag_led_s *)cmd;
1699 unsigned long flags;
1701 spin_lock_irqsave(&bfad->bfad_lock, flags);
1702 iocmd->status = bfa_diag_ledtest(BFA_DIAG_MOD(&bfad->bfa),
1704 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1709 bfad_iocmd_diag_beacon_lport(struct bfad_s *bfad, void *cmd)
1711 struct bfa_bsg_diag_beacon_s *iocmd =
1712 (struct bfa_bsg_diag_beacon_s *)cmd;
1713 unsigned long flags;
1715 spin_lock_irqsave(&bfad->bfad_lock, flags);
1716 iocmd->status = bfa_diag_beacon_port(BFA_DIAG_MOD(&bfad->bfa),
1717 iocmd->beacon, iocmd->link_e2e_beacon,
1719 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1724 bfad_iocmd_diag_lb_stat(struct bfad_s *bfad, void *cmd)
1726 struct bfa_bsg_diag_lb_stat_s *iocmd =
1727 (struct bfa_bsg_diag_lb_stat_s *)cmd;
1728 unsigned long flags;
1730 spin_lock_irqsave(&bfad->bfad_lock, flags);
1731 iocmd->status = bfa_fcdiag_lb_is_running(&bfad->bfa);
1732 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1733 bfa_trc(bfad, iocmd->status);
1739 bfad_iocmd_phy_get_attr(struct bfad_s *bfad, void *cmd)
1741 struct bfa_bsg_phy_attr_s *iocmd =
1742 (struct bfa_bsg_phy_attr_s *)cmd;
1743 struct bfad_hal_comp fcomp;
1744 unsigned long flags;
1746 init_completion(&fcomp.comp);
1747 spin_lock_irqsave(&bfad->bfad_lock, flags);
1748 iocmd->status = bfa_phy_get_attr(BFA_PHY(&bfad->bfa), iocmd->instance,
1749 &iocmd->attr, bfad_hcb_comp, &fcomp);
1750 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1751 if (iocmd->status != BFA_STATUS_OK)
1753 wait_for_completion(&fcomp.comp);
1754 iocmd->status = fcomp.status;
1760 bfad_iocmd_phy_get_stats(struct bfad_s *bfad, void *cmd)
1762 struct bfa_bsg_phy_stats_s *iocmd =
1763 (struct bfa_bsg_phy_stats_s *)cmd;
1764 struct bfad_hal_comp fcomp;
1765 unsigned long flags;
1767 init_completion(&fcomp.comp);
1768 spin_lock_irqsave(&bfad->bfad_lock, flags);
1769 iocmd->status = bfa_phy_get_stats(BFA_PHY(&bfad->bfa), iocmd->instance,
1770 &iocmd->stats, bfad_hcb_comp, &fcomp);
1771 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1772 if (iocmd->status != BFA_STATUS_OK)
1774 wait_for_completion(&fcomp.comp);
1775 iocmd->status = fcomp.status;
1781 bfad_iocmd_phy_read(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
1783 struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
1784 struct bfad_hal_comp fcomp;
1786 unsigned long flags;
1788 if (bfad_chk_iocmd_sz(payload_len,
1789 sizeof(struct bfa_bsg_phy_s),
1790 iocmd->bufsz) != BFA_STATUS_OK) {
1791 iocmd->status = BFA_STATUS_VERSION_FAIL;
1795 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
1796 init_completion(&fcomp.comp);
1797 spin_lock_irqsave(&bfad->bfad_lock, flags);
1798 iocmd->status = bfa_phy_read(BFA_PHY(&bfad->bfa),
1799 iocmd->instance, iocmd_bufptr, iocmd->bufsz,
1800 0, bfad_hcb_comp, &fcomp);
1801 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1802 if (iocmd->status != BFA_STATUS_OK)
1804 wait_for_completion(&fcomp.comp);
1805 iocmd->status = fcomp.status;
1806 if (iocmd->status != BFA_STATUS_OK)
1813 bfad_iocmd_vhba_query(struct bfad_s *bfad, void *cmd)
1815 struct bfa_bsg_vhba_attr_s *iocmd =
1816 (struct bfa_bsg_vhba_attr_s *)cmd;
1817 struct bfa_vhba_attr_s *attr = &iocmd->attr;
1818 unsigned long flags;
1820 spin_lock_irqsave(&bfad->bfad_lock, flags);
1821 attr->pwwn = bfad->bfa.ioc.attr->pwwn;
1822 attr->nwwn = bfad->bfa.ioc.attr->nwwn;
1823 attr->plog_enabled = (bfa_boolean_t)bfad->bfa.plog->plog_enabled;
1824 attr->io_profile = bfa_fcpim_get_io_profile(&bfad->bfa);
1825 attr->path_tov = bfa_fcpim_path_tov_get(&bfad->bfa);
1826 iocmd->status = BFA_STATUS_OK;
1827 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1832 bfad_iocmd_phy_update(struct bfad_s *bfad, void *cmd, unsigned int payload_len)
1834 struct bfa_bsg_phy_s *iocmd = (struct bfa_bsg_phy_s *)cmd;
1836 struct bfad_hal_comp fcomp;
1837 unsigned long flags;
1839 if (bfad_chk_iocmd_sz(payload_len,
1840 sizeof(struct bfa_bsg_phy_s),
1841 iocmd->bufsz) != BFA_STATUS_OK) {
1842 iocmd->status = BFA_STATUS_VERSION_FAIL;
1846 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_phy_s);
1847 init_completion(&fcomp.comp);
1848 spin_lock_irqsave(&bfad->bfad_lock, flags);
1849 iocmd->status = bfa_phy_update(BFA_PHY(&bfad->bfa),
1850 iocmd->instance, iocmd_bufptr, iocmd->bufsz,
1851 0, bfad_hcb_comp, &fcomp);
1852 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1853 if (iocmd->status != BFA_STATUS_OK)
1855 wait_for_completion(&fcomp.comp);
1856 iocmd->status = fcomp.status;
1862 bfad_iocmd_porglog_get(struct bfad_s *bfad, void *cmd)
1864 struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
1867 if (iocmd->bufsz < sizeof(struct bfa_plog_s)) {
1868 bfa_trc(bfad, sizeof(struct bfa_plog_s));
1869 iocmd->status = BFA_STATUS_EINVAL;
1873 iocmd->status = BFA_STATUS_OK;
1874 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
1875 memcpy(iocmd_bufptr, (u8 *) &bfad->plog_buf, sizeof(struct bfa_plog_s));
1880 #define BFA_DEBUG_FW_CORE_CHUNK_SZ 0x4000U /* 16K chunks for FW dump */
1882 bfad_iocmd_debug_fw_core(struct bfad_s *bfad, void *cmd,
1883 unsigned int payload_len)
1885 struct bfa_bsg_debug_s *iocmd = (struct bfa_bsg_debug_s *)cmd;
1887 unsigned long flags;
1890 if (bfad_chk_iocmd_sz(payload_len, sizeof(struct bfa_bsg_debug_s),
1891 BFA_DEBUG_FW_CORE_CHUNK_SZ) != BFA_STATUS_OK) {
1892 iocmd->status = BFA_STATUS_VERSION_FAIL;
1896 if (iocmd->bufsz < BFA_DEBUG_FW_CORE_CHUNK_SZ ||
1897 !IS_ALIGNED(iocmd->bufsz, sizeof(u16)) ||
1898 !IS_ALIGNED(iocmd->offset, sizeof(u32))) {
1899 bfa_trc(bfad, BFA_DEBUG_FW_CORE_CHUNK_SZ);
1900 iocmd->status = BFA_STATUS_EINVAL;
1904 iocmd_bufptr = (char *)iocmd + sizeof(struct bfa_bsg_debug_s);
1905 spin_lock_irqsave(&bfad->bfad_lock, flags);
1906 offset = iocmd->offset;
1907 iocmd->status = bfa_ioc_debug_fwcore(&bfad->bfa.ioc, iocmd_bufptr,
1908 &offset, &iocmd->bufsz);
1909 iocmd->offset = offset;
1910 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1916 bfad_iocmd_debug_ctl(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
1918 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
1919 unsigned long flags;
1921 if (v_cmd == IOCMD_DEBUG_FW_STATE_CLR) {
1922 spin_lock_irqsave(&bfad->bfad_lock, flags);
1923 bfad->bfa.ioc.dbg_fwsave_once = BFA_TRUE;
1924 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1925 } else if (v_cmd == IOCMD_DEBUG_PORTLOG_CLR)
1926 bfad->plog_buf.head = bfad->plog_buf.tail = 0;
1927 else if (v_cmd == IOCMD_DEBUG_START_DTRC)
1928 bfa_trc_init(bfad->trcmod);
1929 else if (v_cmd == IOCMD_DEBUG_STOP_DTRC)
1930 bfa_trc_stop(bfad->trcmod);
1932 iocmd->status = BFA_STATUS_OK;
1937 bfad_iocmd_porglog_ctl(struct bfad_s *bfad, void *cmd)
1939 struct bfa_bsg_portlogctl_s *iocmd = (struct bfa_bsg_portlogctl_s *)cmd;
1941 if (iocmd->ctl == BFA_TRUE)
1942 bfad->plog_buf.plog_enabled = 1;
1944 bfad->plog_buf.plog_enabled = 0;
1946 iocmd->status = BFA_STATUS_OK;
1951 bfad_iocmd_fcpim_cfg_profile(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
1953 struct bfa_bsg_fcpim_profile_s *iocmd =
1954 (struct bfa_bsg_fcpim_profile_s *)cmd;
1956 unsigned long flags;
1958 do_gettimeofday(&tv);
1959 spin_lock_irqsave(&bfad->bfad_lock, flags);
1960 if (v_cmd == IOCMD_FCPIM_PROFILE_ON)
1961 iocmd->status = bfa_fcpim_profile_on(&bfad->bfa, tv.tv_sec);
1962 else if (v_cmd == IOCMD_FCPIM_PROFILE_OFF)
1963 iocmd->status = bfa_fcpim_profile_off(&bfad->bfa);
1964 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1970 bfad_iocmd_itnim_get_ioprofile(struct bfad_s *bfad, void *cmd)
1972 struct bfa_bsg_itnim_ioprofile_s *iocmd =
1973 (struct bfa_bsg_itnim_ioprofile_s *)cmd;
1974 struct bfa_fcs_lport_s *fcs_port;
1975 struct bfa_fcs_itnim_s *itnim;
1976 unsigned long flags;
1978 spin_lock_irqsave(&bfad->bfad_lock, flags);
1979 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs,
1980 iocmd->vf_id, iocmd->lpwwn);
1982 iocmd->status = BFA_STATUS_UNKNOWN_LWWN;
1984 itnim = bfa_fcs_itnim_lookup(fcs_port, iocmd->rpwwn);
1986 iocmd->status = BFA_STATUS_UNKNOWN_RWWN;
1988 iocmd->status = bfa_itnim_get_ioprofile(
1989 bfa_fcs_itnim_get_halitn(itnim),
1992 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
1997 bfad_iocmd_fcport_get_stats(struct bfad_s *bfad, void *cmd)
1999 struct bfa_bsg_fcport_stats_s *iocmd =
2000 (struct bfa_bsg_fcport_stats_s *)cmd;
2001 struct bfad_hal_comp fcomp;
2002 unsigned long flags;
2003 struct bfa_cb_pending_q_s cb_qe;
2005 init_completion(&fcomp.comp);
2006 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
2007 &fcomp, &iocmd->stats);
2008 spin_lock_irqsave(&bfad->bfad_lock, flags);
2009 iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
2010 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2011 if (iocmd->status != BFA_STATUS_OK) {
2012 bfa_trc(bfad, iocmd->status);
2015 wait_for_completion(&fcomp.comp);
2016 iocmd->status = fcomp.status;
2022 bfad_iocmd_fcport_reset_stats(struct bfad_s *bfad, void *cmd)
2024 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2025 struct bfad_hal_comp fcomp;
2026 unsigned long flags;
2027 struct bfa_cb_pending_q_s cb_qe;
2029 init_completion(&fcomp.comp);
2030 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp, &fcomp, NULL);
2032 spin_lock_irqsave(&bfad->bfad_lock, flags);
2033 iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
2034 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2035 if (iocmd->status != BFA_STATUS_OK) {
2036 bfa_trc(bfad, iocmd->status);
2039 wait_for_completion(&fcomp.comp);
2040 iocmd->status = fcomp.status;
2046 bfad_iocmd_boot_cfg(struct bfad_s *bfad, void *cmd)
2048 struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
2049 struct bfad_hal_comp fcomp;
2050 unsigned long flags;
2052 init_completion(&fcomp.comp);
2053 spin_lock_irqsave(&bfad->bfad_lock, flags);
2054 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
2055 BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn),
2056 &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
2057 bfad_hcb_comp, &fcomp);
2058 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2059 if (iocmd->status != BFA_STATUS_OK)
2061 wait_for_completion(&fcomp.comp);
2062 iocmd->status = fcomp.status;
2068 bfad_iocmd_boot_query(struct bfad_s *bfad, void *cmd)
2070 struct bfa_bsg_boot_s *iocmd = (struct bfa_bsg_boot_s *)cmd;
2071 struct bfad_hal_comp fcomp;
2072 unsigned long flags;
2074 init_completion(&fcomp.comp);
2075 spin_lock_irqsave(&bfad->bfad_lock, flags);
2076 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
2077 BFA_FLASH_PART_BOOT, PCI_FUNC(bfad->pcidev->devfn),
2078 &iocmd->cfg, sizeof(struct bfa_boot_cfg_s), 0,
2079 bfad_hcb_comp, &fcomp);
2080 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2081 if (iocmd->status != BFA_STATUS_OK)
2083 wait_for_completion(&fcomp.comp);
2084 iocmd->status = fcomp.status;
2090 bfad_iocmd_preboot_query(struct bfad_s *bfad, void *cmd)
2092 struct bfa_bsg_preboot_s *iocmd = (struct bfa_bsg_preboot_s *)cmd;
2093 struct bfi_iocfc_cfgrsp_s *cfgrsp = bfad->bfa.iocfc.cfgrsp;
2094 struct bfa_boot_pbc_s *pbcfg = &iocmd->cfg;
2095 unsigned long flags;
2097 spin_lock_irqsave(&bfad->bfad_lock, flags);
2098 pbcfg->enable = cfgrsp->pbc_cfg.boot_enabled;
2099 pbcfg->nbluns = cfgrsp->pbc_cfg.nbluns;
2100 pbcfg->speed = cfgrsp->pbc_cfg.port_speed;
2101 memcpy(pbcfg->pblun, cfgrsp->pbc_cfg.blun, sizeof(pbcfg->pblun));
2102 iocmd->status = BFA_STATUS_OK;
2103 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2109 bfad_iocmd_ethboot_cfg(struct bfad_s *bfad, void *cmd)
2111 struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
2112 struct bfad_hal_comp fcomp;
2113 unsigned long flags;
2115 init_completion(&fcomp.comp);
2116 spin_lock_irqsave(&bfad->bfad_lock, flags);
2117 iocmd->status = bfa_flash_update_part(BFA_FLASH(&bfad->bfa),
2118 BFA_FLASH_PART_PXECFG,
2119 bfad->bfa.ioc.port_id, &iocmd->cfg,
2120 sizeof(struct bfa_ethboot_cfg_s), 0,
2121 bfad_hcb_comp, &fcomp);
2122 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2123 if (iocmd->status != BFA_STATUS_OK)
2125 wait_for_completion(&fcomp.comp);
2126 iocmd->status = fcomp.status;
2132 bfad_iocmd_ethboot_query(struct bfad_s *bfad, void *cmd)
2134 struct bfa_bsg_ethboot_s *iocmd = (struct bfa_bsg_ethboot_s *)cmd;
2135 struct bfad_hal_comp fcomp;
2136 unsigned long flags;
2138 init_completion(&fcomp.comp);
2139 spin_lock_irqsave(&bfad->bfad_lock, flags);
2140 iocmd->status = bfa_flash_read_part(BFA_FLASH(&bfad->bfa),
2141 BFA_FLASH_PART_PXECFG,
2142 bfad->bfa.ioc.port_id, &iocmd->cfg,
2143 sizeof(struct bfa_ethboot_cfg_s), 0,
2144 bfad_hcb_comp, &fcomp);
2145 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2146 if (iocmd->status != BFA_STATUS_OK)
2148 wait_for_completion(&fcomp.comp);
2149 iocmd->status = fcomp.status;
2155 bfad_iocmd_cfg_trunk(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2157 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2158 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2159 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2160 unsigned long flags;
2162 spin_lock_irqsave(&bfad->bfad_lock, flags);
2164 if (v_cmd == IOCMD_TRUNK_ENABLE) {
2165 trunk->attr.state = BFA_TRUNK_OFFLINE;
2166 bfa_fcport_disable(&bfad->bfa);
2167 fcport->cfg.trunked = BFA_TRUE;
2168 } else if (v_cmd == IOCMD_TRUNK_DISABLE) {
2169 trunk->attr.state = BFA_TRUNK_DISABLED;
2170 bfa_fcport_disable(&bfad->bfa);
2171 fcport->cfg.trunked = BFA_FALSE;
2174 if (!bfa_fcport_is_disabled(&bfad->bfa))
2175 bfa_fcport_enable(&bfad->bfa);
2177 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2179 iocmd->status = BFA_STATUS_OK;
2184 bfad_iocmd_trunk_get_attr(struct bfad_s *bfad, void *cmd)
2186 struct bfa_bsg_trunk_attr_s *iocmd = (struct bfa_bsg_trunk_attr_s *)cmd;
2187 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2188 struct bfa_fcport_trunk_s *trunk = &fcport->trunk;
2189 unsigned long flags;
2191 spin_lock_irqsave(&bfad->bfad_lock, flags);
2192 memcpy((void *)&iocmd->attr, (void *)&trunk->attr,
2193 sizeof(struct bfa_trunk_attr_s));
2194 iocmd->attr.port_id = bfa_lps_get_base_pid(&bfad->bfa);
2195 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2197 iocmd->status = BFA_STATUS_OK;
2202 bfad_iocmd_qos(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2204 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2205 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2206 unsigned long flags;
2208 spin_lock_irqsave(&bfad->bfad_lock, flags);
2209 if (bfa_ioc_get_type(&bfad->bfa.ioc) == BFA_IOC_TYPE_FC) {
2210 if (v_cmd == IOCMD_QOS_ENABLE)
2211 fcport->cfg.qos_enabled = BFA_TRUE;
2212 else if (v_cmd == IOCMD_QOS_DISABLE)
2213 fcport->cfg.qos_enabled = BFA_FALSE;
2215 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2217 iocmd->status = BFA_STATUS_OK;
2222 bfad_iocmd_qos_get_attr(struct bfad_s *bfad, void *cmd)
2224 struct bfa_bsg_qos_attr_s *iocmd = (struct bfa_bsg_qos_attr_s *)cmd;
2225 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2226 unsigned long flags;
2228 spin_lock_irqsave(&bfad->bfad_lock, flags);
2229 iocmd->attr.state = fcport->qos_attr.state;
2230 iocmd->attr.total_bb_cr = be32_to_cpu(fcport->qos_attr.total_bb_cr);
2231 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2233 iocmd->status = BFA_STATUS_OK;
2238 bfad_iocmd_qos_get_vc_attr(struct bfad_s *bfad, void *cmd)
2240 struct bfa_bsg_qos_vc_attr_s *iocmd =
2241 (struct bfa_bsg_qos_vc_attr_s *)cmd;
2242 struct bfa_fcport_s *fcport = BFA_FCPORT_MOD(&bfad->bfa);
2243 struct bfa_qos_vc_attr_s *bfa_vc_attr = &fcport->qos_vc_attr;
2244 unsigned long flags;
2247 spin_lock_irqsave(&bfad->bfad_lock, flags);
2248 iocmd->attr.total_vc_count = be16_to_cpu(bfa_vc_attr->total_vc_count);
2249 iocmd->attr.shared_credit = be16_to_cpu(bfa_vc_attr->shared_credit);
2250 iocmd->attr.elp_opmode_flags =
2251 be32_to_cpu(bfa_vc_attr->elp_opmode_flags);
2253 /* Individual VC info */
2254 while (i < iocmd->attr.total_vc_count) {
2255 iocmd->attr.vc_info[i].vc_credit =
2256 bfa_vc_attr->vc_info[i].vc_credit;
2257 iocmd->attr.vc_info[i].borrow_credit =
2258 bfa_vc_attr->vc_info[i].borrow_credit;
2259 iocmd->attr.vc_info[i].priority =
2260 bfa_vc_attr->vc_info[i].priority;
2263 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2265 iocmd->status = BFA_STATUS_OK;
2270 bfad_iocmd_qos_get_stats(struct bfad_s *bfad, void *cmd)
2272 struct bfa_bsg_fcport_stats_s *iocmd =
2273 (struct bfa_bsg_fcport_stats_s *)cmd;
2274 struct bfad_hal_comp fcomp;
2275 unsigned long flags;
2276 struct bfa_cb_pending_q_s cb_qe;
2278 init_completion(&fcomp.comp);
2279 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
2280 &fcomp, &iocmd->stats);
2282 spin_lock_irqsave(&bfad->bfad_lock, flags);
2283 WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
2284 iocmd->status = bfa_fcport_get_stats(&bfad->bfa, &cb_qe);
2285 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2286 if (iocmd->status != BFA_STATUS_OK) {
2287 bfa_trc(bfad, iocmd->status);
2290 wait_for_completion(&fcomp.comp);
2291 iocmd->status = fcomp.status;
2297 bfad_iocmd_qos_reset_stats(struct bfad_s *bfad, void *cmd)
2299 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)cmd;
2300 struct bfad_hal_comp fcomp;
2301 unsigned long flags;
2302 struct bfa_cb_pending_q_s cb_qe;
2304 init_completion(&fcomp.comp);
2305 bfa_pending_q_init(&cb_qe, (bfa_cb_cbfn_t)bfad_hcb_comp,
2308 spin_lock_irqsave(&bfad->bfad_lock, flags);
2309 WARN_ON(!bfa_ioc_get_fcmode(&bfad->bfa.ioc));
2310 iocmd->status = bfa_fcport_clear_stats(&bfad->bfa, &cb_qe);
2311 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2312 if (iocmd->status != BFA_STATUS_OK) {
2313 bfa_trc(bfad, iocmd->status);
2316 wait_for_completion(&fcomp.comp);
2317 iocmd->status = fcomp.status;
2323 bfad_iocmd_vf_get_stats(struct bfad_s *bfad, void *cmd)
2325 struct bfa_bsg_vf_stats_s *iocmd =
2326 (struct bfa_bsg_vf_stats_s *)cmd;
2327 struct bfa_fcs_fabric_s *fcs_vf;
2328 unsigned long flags;
2330 spin_lock_irqsave(&bfad->bfad_lock, flags);
2331 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
2332 if (fcs_vf == NULL) {
2333 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2334 iocmd->status = BFA_STATUS_UNKNOWN_VFID;
2337 memcpy((void *)&iocmd->stats, (void *)&fcs_vf->stats,
2338 sizeof(struct bfa_vf_stats_s));
2339 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2340 iocmd->status = BFA_STATUS_OK;
2346 bfad_iocmd_vf_clr_stats(struct bfad_s *bfad, void *cmd)
2348 struct bfa_bsg_vf_reset_stats_s *iocmd =
2349 (struct bfa_bsg_vf_reset_stats_s *)cmd;
2350 struct bfa_fcs_fabric_s *fcs_vf;
2351 unsigned long flags;
2353 spin_lock_irqsave(&bfad->bfad_lock, flags);
2354 fcs_vf = bfa_fcs_vf_lookup(&bfad->bfa_fcs, iocmd->vf_id);
2355 if (fcs_vf == NULL) {
2356 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2357 iocmd->status = BFA_STATUS_UNKNOWN_VFID;
2360 memset((void *)&fcs_vf->stats, 0, sizeof(struct bfa_vf_stats_s));
2361 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2362 iocmd->status = BFA_STATUS_OK;
2367 /* Function to reset the LUN SCAN mode */
2369 bfad_iocmd_lunmask_reset_lunscan_mode(struct bfad_s *bfad, int lunmask_cfg)
2371 struct bfad_im_port_s *pport_im = bfad->pport.im_port;
2372 struct bfad_vport_s *vport = NULL;
2374 /* Set the scsi device LUN SCAN flags for base port */
2375 bfad_reset_sdev_bflags(pport_im, lunmask_cfg);
2377 /* Set the scsi device LUN SCAN flags for the vports */
2378 list_for_each_entry(vport, &bfad->vport_list, list_entry)
2379 bfad_reset_sdev_bflags(vport->drv_port.im_port, lunmask_cfg);
2383 bfad_iocmd_lunmask(struct bfad_s *bfad, void *pcmd, unsigned int v_cmd)
2385 struct bfa_bsg_gen_s *iocmd = (struct bfa_bsg_gen_s *)pcmd;
2386 unsigned long flags;
2388 spin_lock_irqsave(&bfad->bfad_lock, flags);
2389 if (v_cmd == IOCMD_FCPIM_LUNMASK_ENABLE) {
2390 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_TRUE);
2391 /* Set the LUN Scanning mode to be Sequential scan */
2392 if (iocmd->status == BFA_STATUS_OK)
2393 bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_TRUE);
2394 } else if (v_cmd == IOCMD_FCPIM_LUNMASK_DISABLE) {
2395 iocmd->status = bfa_fcpim_lunmask_update(&bfad->bfa, BFA_FALSE);
2396 /* Set the LUN Scanning mode to default REPORT_LUNS scan */
2397 if (iocmd->status == BFA_STATUS_OK)
2398 bfad_iocmd_lunmask_reset_lunscan_mode(bfad, BFA_FALSE);
2399 } else if (v_cmd == IOCMD_FCPIM_LUNMASK_CLEAR)
2400 iocmd->status = bfa_fcpim_lunmask_clear(&bfad->bfa);
2401 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2406 bfad_iocmd_fcpim_lunmask_query(struct bfad_s *bfad, void *cmd)
2408 struct bfa_bsg_fcpim_lunmask_query_s *iocmd =
2409 (struct bfa_bsg_fcpim_lunmask_query_s *)cmd;
2410 struct bfa_lunmask_cfg_s *lun_mask = &iocmd->lun_mask;
2411 unsigned long flags;
2413 spin_lock_irqsave(&bfad->bfad_lock, flags);
2414 iocmd->status = bfa_fcpim_lunmask_query(&bfad->bfa, lun_mask);
2415 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2420 bfad_iocmd_fcpim_cfg_lunmask(struct bfad_s *bfad, void *cmd, unsigned int v_cmd)
2422 struct bfa_bsg_fcpim_lunmask_s *iocmd =
2423 (struct bfa_bsg_fcpim_lunmask_s *)cmd;
2424 unsigned long flags;
2426 spin_lock_irqsave(&bfad->bfad_lock, flags);
2427 if (v_cmd == IOCMD_FCPIM_LUNMASK_ADD)
2428 iocmd->status = bfa_fcpim_lunmask_add(&bfad->bfa, iocmd->vf_id,
2429 &iocmd->pwwn, iocmd->rpwwn, iocmd->lun);
2430 else if (v_cmd == IOCMD_FCPIM_LUNMASK_DELETE)
2431 iocmd->status = bfa_fcpim_lunmask_delete(&bfad->bfa,
2432 iocmd->vf_id, &iocmd->pwwn,
2433 iocmd->rpwwn, iocmd->lun);
2434 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2439 bfad_iocmd_handler(struct bfad_s *bfad, unsigned int cmd, void *iocmd,
2440 unsigned int payload_len)
2445 case IOCMD_IOC_ENABLE:
2446 rc = bfad_iocmd_ioc_enable(bfad, iocmd);
2448 case IOCMD_IOC_DISABLE:
2449 rc = bfad_iocmd_ioc_disable(bfad, iocmd);
2451 case IOCMD_IOC_GET_INFO:
2452 rc = bfad_iocmd_ioc_get_info(bfad, iocmd);
2454 case IOCMD_IOC_GET_ATTR:
2455 rc = bfad_iocmd_ioc_get_attr(bfad, iocmd);
2457 case IOCMD_IOC_GET_STATS:
2458 rc = bfad_iocmd_ioc_get_stats(bfad, iocmd);
2460 case IOCMD_IOC_GET_FWSTATS:
2461 rc = bfad_iocmd_ioc_get_fwstats(bfad, iocmd, payload_len);
2463 case IOCMD_IOC_RESET_STATS:
2464 case IOCMD_IOC_RESET_FWSTATS:
2465 rc = bfad_iocmd_ioc_reset_stats(bfad, iocmd, cmd);
2467 case IOCMD_IOC_SET_ADAPTER_NAME:
2468 case IOCMD_IOC_SET_PORT_NAME:
2469 rc = bfad_iocmd_ioc_set_name(bfad, iocmd, cmd);
2471 case IOCMD_IOCFC_GET_ATTR:
2472 rc = bfad_iocmd_iocfc_get_attr(bfad, iocmd);
2474 case IOCMD_IOCFC_SET_INTR:
2475 rc = bfad_iocmd_iocfc_set_intr(bfad, iocmd);
2477 case IOCMD_PORT_ENABLE:
2478 rc = bfad_iocmd_port_enable(bfad, iocmd);
2480 case IOCMD_PORT_DISABLE:
2481 rc = bfad_iocmd_port_disable(bfad, iocmd);
2483 case IOCMD_PORT_GET_ATTR:
2484 rc = bfad_iocmd_port_get_attr(bfad, iocmd);
2486 case IOCMD_PORT_GET_STATS:
2487 rc = bfad_iocmd_port_get_stats(bfad, iocmd, payload_len);
2489 case IOCMD_PORT_RESET_STATS:
2490 rc = bfad_iocmd_port_reset_stats(bfad, iocmd);
2492 case IOCMD_PORT_CFG_TOPO:
2493 case IOCMD_PORT_CFG_SPEED:
2494 case IOCMD_PORT_CFG_ALPA:
2495 case IOCMD_PORT_CLR_ALPA:
2496 rc = bfad_iocmd_set_port_cfg(bfad, iocmd, cmd);
2498 case IOCMD_PORT_CFG_MAXFRSZ:
2499 rc = bfad_iocmd_port_cfg_maxfrsize(bfad, iocmd);
2501 case IOCMD_PORT_BBSC_ENABLE:
2502 case IOCMD_PORT_BBSC_DISABLE:
2503 rc = bfad_iocmd_port_cfg_bbsc(bfad, iocmd, cmd);
2505 case IOCMD_LPORT_GET_ATTR:
2506 rc = bfad_iocmd_lport_get_attr(bfad, iocmd);
2508 case IOCMD_LPORT_GET_STATS:
2509 rc = bfad_iocmd_lport_get_stats(bfad, iocmd);
2511 case IOCMD_LPORT_RESET_STATS:
2512 rc = bfad_iocmd_lport_reset_stats(bfad, iocmd);
2514 case IOCMD_LPORT_GET_IOSTATS:
2515 rc = bfad_iocmd_lport_get_iostats(bfad, iocmd);
2517 case IOCMD_LPORT_GET_RPORTS:
2518 rc = bfad_iocmd_lport_get_rports(bfad, iocmd, payload_len);
2520 case IOCMD_RPORT_GET_ATTR:
2521 rc = bfad_iocmd_rport_get_attr(bfad, iocmd);
2523 case IOCMD_RPORT_GET_ADDR:
2524 rc = bfad_iocmd_rport_get_addr(bfad, iocmd);
2526 case IOCMD_RPORT_GET_STATS:
2527 rc = bfad_iocmd_rport_get_stats(bfad, iocmd);
2529 case IOCMD_RPORT_RESET_STATS:
2530 rc = bfad_iocmd_rport_clr_stats(bfad, iocmd);
2532 case IOCMD_RPORT_SET_SPEED:
2533 rc = bfad_iocmd_rport_set_speed(bfad, iocmd);
2535 case IOCMD_VPORT_GET_ATTR:
2536 rc = bfad_iocmd_vport_get_attr(bfad, iocmd);
2538 case IOCMD_VPORT_GET_STATS:
2539 rc = bfad_iocmd_vport_get_stats(bfad, iocmd);
2541 case IOCMD_VPORT_RESET_STATS:
2542 rc = bfad_iocmd_vport_clr_stats(bfad, iocmd);
2544 case IOCMD_FABRIC_GET_LPORTS:
2545 rc = bfad_iocmd_fabric_get_lports(bfad, iocmd, payload_len);
2547 case IOCMD_RATELIM_ENABLE:
2548 case IOCMD_RATELIM_DISABLE:
2549 rc = bfad_iocmd_ratelim(bfad, cmd, iocmd);
2551 case IOCMD_RATELIM_DEF_SPEED:
2552 rc = bfad_iocmd_ratelim_speed(bfad, cmd, iocmd);
2554 case IOCMD_FCPIM_FAILOVER:
2555 rc = bfad_iocmd_cfg_fcpim(bfad, iocmd);
2557 case IOCMD_FCPIM_MODSTATS:
2558 rc = bfad_iocmd_fcpim_get_modstats(bfad, iocmd);
2560 case IOCMD_FCPIM_MODSTATSCLR:
2561 rc = bfad_iocmd_fcpim_clr_modstats(bfad, iocmd);
2563 case IOCMD_FCPIM_DEL_ITN_STATS:
2564 rc = bfad_iocmd_fcpim_get_del_itn_stats(bfad, iocmd);
2566 case IOCMD_ITNIM_GET_ATTR:
2567 rc = bfad_iocmd_itnim_get_attr(bfad, iocmd);
2569 case IOCMD_ITNIM_GET_IOSTATS:
2570 rc = bfad_iocmd_itnim_get_iostats(bfad, iocmd);
2572 case IOCMD_ITNIM_RESET_STATS:
2573 rc = bfad_iocmd_itnim_reset_stats(bfad, iocmd);
2575 case IOCMD_ITNIM_GET_ITNSTATS:
2576 rc = bfad_iocmd_itnim_get_itnstats(bfad, iocmd);
2578 case IOCMD_FCPORT_ENABLE:
2579 rc = bfad_iocmd_fcport_enable(bfad, iocmd);
2581 case IOCMD_FCPORT_DISABLE:
2582 rc = bfad_iocmd_fcport_disable(bfad, iocmd);
2584 case IOCMD_IOC_PCIFN_CFG:
2585 rc = bfad_iocmd_ioc_get_pcifn_cfg(bfad, iocmd);
2587 case IOCMD_PCIFN_CREATE:
2588 rc = bfad_iocmd_pcifn_create(bfad, iocmd);
2590 case IOCMD_PCIFN_DELETE:
2591 rc = bfad_iocmd_pcifn_delete(bfad, iocmd);
2593 case IOCMD_PCIFN_BW:
2594 rc = bfad_iocmd_pcifn_bw(bfad, iocmd);
2596 case IOCMD_ADAPTER_CFG_MODE:
2597 rc = bfad_iocmd_adapter_cfg_mode(bfad, iocmd);
2599 case IOCMD_PORT_CFG_MODE:
2600 rc = bfad_iocmd_port_cfg_mode(bfad, iocmd);
2602 case IOCMD_FLASH_ENABLE_OPTROM:
2603 case IOCMD_FLASH_DISABLE_OPTROM:
2604 rc = bfad_iocmd_ablk_optrom(bfad, cmd, iocmd);
2606 case IOCMD_FAA_QUERY:
2607 rc = bfad_iocmd_faa_query(bfad, iocmd);
2609 case IOCMD_CEE_GET_ATTR:
2610 rc = bfad_iocmd_cee_attr(bfad, iocmd, payload_len);
2612 case IOCMD_CEE_GET_STATS:
2613 rc = bfad_iocmd_cee_get_stats(bfad, iocmd, payload_len);
2615 case IOCMD_CEE_RESET_STATS:
2616 rc = bfad_iocmd_cee_reset_stats(bfad, iocmd);
2618 case IOCMD_SFP_MEDIA:
2619 rc = bfad_iocmd_sfp_media(bfad, iocmd);
2621 case IOCMD_SFP_SPEED:
2622 rc = bfad_iocmd_sfp_speed(bfad, iocmd);
2624 case IOCMD_FLASH_GET_ATTR:
2625 rc = bfad_iocmd_flash_get_attr(bfad, iocmd);
2627 case IOCMD_FLASH_ERASE_PART:
2628 rc = bfad_iocmd_flash_erase_part(bfad, iocmd);
2630 case IOCMD_FLASH_UPDATE_PART:
2631 rc = bfad_iocmd_flash_update_part(bfad, iocmd, payload_len);
2633 case IOCMD_FLASH_READ_PART:
2634 rc = bfad_iocmd_flash_read_part(bfad, iocmd, payload_len);
2636 case IOCMD_DIAG_TEMP:
2637 rc = bfad_iocmd_diag_temp(bfad, iocmd);
2639 case IOCMD_DIAG_MEMTEST:
2640 rc = bfad_iocmd_diag_memtest(bfad, iocmd);
2642 case IOCMD_DIAG_LOOPBACK:
2643 rc = bfad_iocmd_diag_loopback(bfad, iocmd);
2645 case IOCMD_DIAG_FWPING:
2646 rc = bfad_iocmd_diag_fwping(bfad, iocmd);
2648 case IOCMD_DIAG_QUEUETEST:
2649 rc = bfad_iocmd_diag_queuetest(bfad, iocmd);
2651 case IOCMD_DIAG_SFP:
2652 rc = bfad_iocmd_diag_sfp(bfad, iocmd);
2654 case IOCMD_DIAG_LED:
2655 rc = bfad_iocmd_diag_led(bfad, iocmd);
2657 case IOCMD_DIAG_BEACON_LPORT:
2658 rc = bfad_iocmd_diag_beacon_lport(bfad, iocmd);
2660 case IOCMD_DIAG_LB_STAT:
2661 rc = bfad_iocmd_diag_lb_stat(bfad, iocmd);
2663 case IOCMD_PHY_GET_ATTR:
2664 rc = bfad_iocmd_phy_get_attr(bfad, iocmd);
2666 case IOCMD_PHY_GET_STATS:
2667 rc = bfad_iocmd_phy_get_stats(bfad, iocmd);
2669 case IOCMD_PHY_UPDATE_FW:
2670 rc = bfad_iocmd_phy_update(bfad, iocmd, payload_len);
2672 case IOCMD_PHY_READ_FW:
2673 rc = bfad_iocmd_phy_read(bfad, iocmd, payload_len);
2675 case IOCMD_VHBA_QUERY:
2676 rc = bfad_iocmd_vhba_query(bfad, iocmd);
2678 case IOCMD_DEBUG_PORTLOG:
2679 rc = bfad_iocmd_porglog_get(bfad, iocmd);
2681 case IOCMD_DEBUG_FW_CORE:
2682 rc = bfad_iocmd_debug_fw_core(bfad, iocmd, payload_len);
2684 case IOCMD_DEBUG_FW_STATE_CLR:
2685 case IOCMD_DEBUG_PORTLOG_CLR:
2686 case IOCMD_DEBUG_START_DTRC:
2687 case IOCMD_DEBUG_STOP_DTRC:
2688 rc = bfad_iocmd_debug_ctl(bfad, iocmd, cmd);
2690 case IOCMD_DEBUG_PORTLOG_CTL:
2691 rc = bfad_iocmd_porglog_ctl(bfad, iocmd);
2693 case IOCMD_FCPIM_PROFILE_ON:
2694 case IOCMD_FCPIM_PROFILE_OFF:
2695 rc = bfad_iocmd_fcpim_cfg_profile(bfad, iocmd, cmd);
2697 case IOCMD_ITNIM_GET_IOPROFILE:
2698 rc = bfad_iocmd_itnim_get_ioprofile(bfad, iocmd);
2700 case IOCMD_FCPORT_GET_STATS:
2701 rc = bfad_iocmd_fcport_get_stats(bfad, iocmd);
2703 case IOCMD_FCPORT_RESET_STATS:
2704 rc = bfad_iocmd_fcport_reset_stats(bfad, iocmd);
2706 case IOCMD_BOOT_CFG:
2707 rc = bfad_iocmd_boot_cfg(bfad, iocmd);
2709 case IOCMD_BOOT_QUERY:
2710 rc = bfad_iocmd_boot_query(bfad, iocmd);
2712 case IOCMD_PREBOOT_QUERY:
2713 rc = bfad_iocmd_preboot_query(bfad, iocmd);
2715 case IOCMD_ETHBOOT_CFG:
2716 rc = bfad_iocmd_ethboot_cfg(bfad, iocmd);
2718 case IOCMD_ETHBOOT_QUERY:
2719 rc = bfad_iocmd_ethboot_query(bfad, iocmd);
2721 case IOCMD_TRUNK_ENABLE:
2722 case IOCMD_TRUNK_DISABLE:
2723 rc = bfad_iocmd_cfg_trunk(bfad, iocmd, cmd);
2725 case IOCMD_TRUNK_GET_ATTR:
2726 rc = bfad_iocmd_trunk_get_attr(bfad, iocmd);
2728 case IOCMD_QOS_ENABLE:
2729 case IOCMD_QOS_DISABLE:
2730 rc = bfad_iocmd_qos(bfad, iocmd, cmd);
2732 case IOCMD_QOS_GET_ATTR:
2733 rc = bfad_iocmd_qos_get_attr(bfad, iocmd);
2735 case IOCMD_QOS_GET_VC_ATTR:
2736 rc = bfad_iocmd_qos_get_vc_attr(bfad, iocmd);
2738 case IOCMD_QOS_GET_STATS:
2739 rc = bfad_iocmd_qos_get_stats(bfad, iocmd);
2741 case IOCMD_QOS_RESET_STATS:
2742 rc = bfad_iocmd_qos_reset_stats(bfad, iocmd);
2744 case IOCMD_VF_GET_STATS:
2745 rc = bfad_iocmd_vf_get_stats(bfad, iocmd);
2747 case IOCMD_VF_RESET_STATS:
2748 rc = bfad_iocmd_vf_clr_stats(bfad, iocmd);
2750 case IOCMD_FCPIM_LUNMASK_ENABLE:
2751 case IOCMD_FCPIM_LUNMASK_DISABLE:
2752 case IOCMD_FCPIM_LUNMASK_CLEAR:
2753 rc = bfad_iocmd_lunmask(bfad, iocmd, cmd);
2755 case IOCMD_FCPIM_LUNMASK_QUERY:
2756 rc = bfad_iocmd_fcpim_lunmask_query(bfad, iocmd);
2758 case IOCMD_FCPIM_LUNMASK_ADD:
2759 case IOCMD_FCPIM_LUNMASK_DELETE:
2760 rc = bfad_iocmd_fcpim_cfg_lunmask(bfad, iocmd, cmd);
2770 bfad_im_bsg_vendor_request(struct fc_bsg_job *job)
2772 uint32_t vendor_cmd = job->request->rqst_data.h_vendor.vendor_cmd[0];
2773 struct bfad_im_port_s *im_port =
2774 (struct bfad_im_port_s *) job->shost->hostdata[0];
2775 struct bfad_s *bfad = im_port->bfad;
2776 struct request_queue *request_q = job->req->q;
2781 * Set the BSG device request_queue size to 256 to support
2782 * payloads larger than 512*1024K bytes.
2784 blk_queue_max_segments(request_q, 256);
2786 /* Allocate a temp buffer to hold the passed in user space command */
2787 payload_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
2788 if (!payload_kbuf) {
2793 /* Copy the sg_list passed in to a linear buffer: holds the cmnd data */
2794 sg_copy_to_buffer(job->request_payload.sg_list,
2795 job->request_payload.sg_cnt, payload_kbuf,
2796 job->request_payload.payload_len);
2798 /* Invoke IOCMD handler - to handle all the vendor command requests */
2799 rc = bfad_iocmd_handler(bfad, vendor_cmd, payload_kbuf,
2800 job->request_payload.payload_len);
2801 if (rc != BFA_STATUS_OK)
2804 /* Copy the response data to the job->reply_payload sg_list */
2805 sg_copy_from_buffer(job->reply_payload.sg_list,
2806 job->reply_payload.sg_cnt,
2808 job->reply_payload.payload_len);
2810 /* free the command buffer */
2811 kfree(payload_kbuf);
2813 /* Fill the BSG job reply data */
2814 job->reply_len = job->reply_payload.payload_len;
2815 job->reply->reply_payload_rcv_len = job->reply_payload.payload_len;
2816 job->reply->result = rc;
2821 /* free the command buffer */
2822 kfree(payload_kbuf);
2824 job->reply->result = rc;
2825 job->reply_len = sizeof(uint32_t);
2826 job->reply->reply_payload_rcv_len = 0;
2830 /* FC passthru call backs */
2832 bfad_fcxp_get_req_sgaddr_cb(void *bfad_fcxp, int sgeid)
2834 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
2835 struct bfa_sge_s *sge;
2838 sge = drv_fcxp->req_sge + sgeid;
2839 addr = (u64)(size_t) sge->sg_addr;
2844 bfad_fcxp_get_req_sglen_cb(void *bfad_fcxp, int sgeid)
2846 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
2847 struct bfa_sge_s *sge;
2849 sge = drv_fcxp->req_sge + sgeid;
2854 bfad_fcxp_get_rsp_sgaddr_cb(void *bfad_fcxp, int sgeid)
2856 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
2857 struct bfa_sge_s *sge;
2860 sge = drv_fcxp->rsp_sge + sgeid;
2861 addr = (u64)(size_t) sge->sg_addr;
2866 bfad_fcxp_get_rsp_sglen_cb(void *bfad_fcxp, int sgeid)
2868 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
2869 struct bfa_sge_s *sge;
2871 sge = drv_fcxp->rsp_sge + sgeid;
2876 bfad_send_fcpt_cb(void *bfad_fcxp, struct bfa_fcxp_s *fcxp, void *cbarg,
2877 bfa_status_t req_status, u32 rsp_len, u32 resid_len,
2878 struct fchs_s *rsp_fchs)
2880 struct bfad_fcxp *drv_fcxp = bfad_fcxp;
2882 drv_fcxp->req_status = req_status;
2883 drv_fcxp->rsp_len = rsp_len;
2885 /* bfa_fcxp will be automatically freed by BFA */
2886 drv_fcxp->bfa_fcxp = NULL;
2887 complete(&drv_fcxp->comp);
2890 struct bfad_buf_info *
2891 bfad_fcxp_map_sg(struct bfad_s *bfad, void *payload_kbuf,
2892 uint32_t payload_len, uint32_t *num_sgles)
2894 struct bfad_buf_info *buf_base, *buf_info;
2895 struct bfa_sge_s *sg_table;
2898 buf_base = kzalloc((sizeof(struct bfad_buf_info) +
2899 sizeof(struct bfa_sge_s)) * sge_num, GFP_KERNEL);
2903 sg_table = (struct bfa_sge_s *) (((uint8_t *)buf_base) +
2904 (sizeof(struct bfad_buf_info) * sge_num));
2906 /* Allocate dma coherent memory */
2907 buf_info = buf_base;
2908 buf_info->size = payload_len;
2909 buf_info->virt = dma_alloc_coherent(&bfad->pcidev->dev, buf_info->size,
2910 &buf_info->phys, GFP_KERNEL);
2911 if (!buf_info->virt)
2914 /* copy the linear bsg buffer to buf_info */
2915 memset(buf_info->virt, 0, buf_info->size);
2916 memcpy(buf_info->virt, payload_kbuf, buf_info->size);
2921 sg_table->sg_len = buf_info->size;
2922 sg_table->sg_addr = (void *)(size_t) buf_info->phys;
2924 *num_sgles = sge_num;
2934 bfad_fcxp_free_mem(struct bfad_s *bfad, struct bfad_buf_info *buf_base,
2938 struct bfad_buf_info *buf_info = buf_base;
2941 for (i = 0; i < num_sgles; buf_info++, i++) {
2942 if (buf_info->virt != NULL)
2943 dma_free_coherent(&bfad->pcidev->dev,
2944 buf_info->size, buf_info->virt,
2952 bfad_fcxp_bsg_send(struct fc_bsg_job *job, struct bfad_fcxp *drv_fcxp,
2953 bfa_bsg_fcpt_t *bsg_fcpt)
2955 struct bfa_fcxp_s *hal_fcxp;
2956 struct bfad_s *bfad = drv_fcxp->port->bfad;
2957 unsigned long flags;
2960 spin_lock_irqsave(&bfad->bfad_lock, flags);
2962 /* Allocate bfa_fcxp structure */
2963 hal_fcxp = bfa_fcxp_req_rsp_alloc(drv_fcxp, &bfad->bfa,
2964 drv_fcxp->num_req_sgles,
2965 drv_fcxp->num_rsp_sgles,
2966 bfad_fcxp_get_req_sgaddr_cb,
2967 bfad_fcxp_get_req_sglen_cb,
2968 bfad_fcxp_get_rsp_sgaddr_cb,
2969 bfad_fcxp_get_rsp_sglen_cb, BFA_TRUE);
2972 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2973 return BFA_STATUS_ENOMEM;
2976 drv_fcxp->bfa_fcxp = hal_fcxp;
2978 lp_tag = bfa_lps_get_tag_from_pid(&bfad->bfa, bsg_fcpt->fchs.s_id);
2980 bfa_fcxp_send(hal_fcxp, drv_fcxp->bfa_rport, bsg_fcpt->vf_id, lp_tag,
2981 bsg_fcpt->cts, bsg_fcpt->cos,
2982 job->request_payload.payload_len,
2983 &bsg_fcpt->fchs, bfad_send_fcpt_cb, bfad,
2984 job->reply_payload.payload_len, bsg_fcpt->tsecs);
2986 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
2988 return BFA_STATUS_OK;
2992 bfad_im_bsg_els_ct_request(struct fc_bsg_job *job)
2994 struct bfa_bsg_data *bsg_data;
2995 struct bfad_im_port_s *im_port =
2996 (struct bfad_im_port_s *) job->shost->hostdata[0];
2997 struct bfad_s *bfad = im_port->bfad;
2998 bfa_bsg_fcpt_t *bsg_fcpt;
2999 struct bfad_fcxp *drv_fcxp;
3000 struct bfa_fcs_lport_s *fcs_port;
3001 struct bfa_fcs_rport_s *fcs_rport;
3002 uint32_t command_type = job->request->msgcode;
3003 unsigned long flags;
3004 struct bfad_buf_info *rsp_buf_info;
3005 void *req_kbuf = NULL, *rsp_kbuf = NULL;
3008 job->reply_len = sizeof(uint32_t); /* Atleast uint32_t reply_len */
3009 job->reply->reply_payload_rcv_len = 0;
3011 /* Get the payload passed in from userspace */
3012 bsg_data = (struct bfa_bsg_data *) (((char *)job->request) +
3013 sizeof(struct fc_bsg_request));
3014 if (bsg_data == NULL)
3018 * Allocate buffer for bsg_fcpt and do a copy_from_user op for payload
3019 * buffer of size bsg_data->payload_len
3021 bsg_fcpt = kzalloc(bsg_data->payload_len, GFP_KERNEL);
3027 if (copy_from_user((uint8_t *)bsg_fcpt, bsg_data->payload,
3028 bsg_data->payload_len)) {
3034 drv_fcxp = kzalloc(sizeof(struct bfad_fcxp), GFP_KERNEL);
3035 if (drv_fcxp == NULL) {
3041 spin_lock_irqsave(&bfad->bfad_lock, flags);
3042 fcs_port = bfa_fcs_lookup_port(&bfad->bfa_fcs, bsg_fcpt->vf_id,
3044 if (fcs_port == NULL) {
3045 bsg_fcpt->status = BFA_STATUS_UNKNOWN_LWWN;
3046 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3050 /* Check if the port is online before sending FC Passthru cmd */
3051 if (!bfa_fcs_lport_is_online(fcs_port)) {
3052 bsg_fcpt->status = BFA_STATUS_PORT_OFFLINE;
3053 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3057 drv_fcxp->port = fcs_port->bfad_port;
3059 if (drv_fcxp->port->bfad == 0)
3060 drv_fcxp->port->bfad = bfad;
3062 /* Fetch the bfa_rport - if nexus needed */
3063 if (command_type == FC_BSG_HST_ELS_NOLOGIN ||
3064 command_type == FC_BSG_HST_CT) {
3065 /* BSG HST commands: no nexus needed */
3066 drv_fcxp->bfa_rport = NULL;
3068 } else if (command_type == FC_BSG_RPT_ELS ||
3069 command_type == FC_BSG_RPT_CT) {
3070 /* BSG RPT commands: nexus needed */
3071 fcs_rport = bfa_fcs_lport_get_rport_by_pwwn(fcs_port,
3073 if (fcs_rport == NULL) {
3074 bsg_fcpt->status = BFA_STATUS_UNKNOWN_RWWN;
3075 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3079 drv_fcxp->bfa_rport = fcs_rport->bfa_rport;
3081 } else { /* Unknown BSG msgcode; return -EINVAL */
3082 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3086 spin_unlock_irqrestore(&bfad->bfad_lock, flags);
3088 /* allocate memory for req / rsp buffers */
3089 req_kbuf = kzalloc(job->request_payload.payload_len, GFP_KERNEL);
3091 printk(KERN_INFO "bfa %s: fcpt request buffer alloc failed\n",
3097 rsp_kbuf = kzalloc(job->reply_payload.payload_len, GFP_KERNEL);
3099 printk(KERN_INFO "bfa %s: fcpt response buffer alloc failed\n",
3105 /* map req sg - copy the sg_list passed in to the linear buffer */
3106 sg_copy_to_buffer(job->request_payload.sg_list,
3107 job->request_payload.sg_cnt, req_kbuf,
3108 job->request_payload.payload_len);
3110 drv_fcxp->reqbuf_info = bfad_fcxp_map_sg(bfad, req_kbuf,
3111 job->request_payload.payload_len,
3112 &drv_fcxp->num_req_sgles);
3113 if (!drv_fcxp->reqbuf_info) {
3114 printk(KERN_INFO "bfa %s: fcpt request fcxp_map_sg failed\n",
3120 drv_fcxp->req_sge = (struct bfa_sge_s *)
3121 (((uint8_t *)drv_fcxp->reqbuf_info) +
3122 (sizeof(struct bfad_buf_info) *
3123 drv_fcxp->num_req_sgles));
3126 drv_fcxp->rspbuf_info = bfad_fcxp_map_sg(bfad, rsp_kbuf,
3127 job->reply_payload.payload_len,
3128 &drv_fcxp->num_rsp_sgles);
3129 if (!drv_fcxp->rspbuf_info) {
3130 printk(KERN_INFO "bfa %s: fcpt response fcxp_map_sg failed\n",
3136 rsp_buf_info = (struct bfad_buf_info *)drv_fcxp->rspbuf_info;
3137 drv_fcxp->rsp_sge = (struct bfa_sge_s *)
3138 (((uint8_t *)drv_fcxp->rspbuf_info) +
3139 (sizeof(struct bfad_buf_info) *
3140 drv_fcxp->num_rsp_sgles));
3143 init_completion(&drv_fcxp->comp);
3144 rc = bfad_fcxp_bsg_send(job, drv_fcxp, bsg_fcpt);
3145 if (rc == BFA_STATUS_OK) {
3146 wait_for_completion(&drv_fcxp->comp);
3147 bsg_fcpt->status = drv_fcxp->req_status;
3149 bsg_fcpt->status = rc;
3153 /* fill the job->reply data */
3154 if (drv_fcxp->req_status == BFA_STATUS_OK) {
3155 job->reply_len = drv_fcxp->rsp_len;
3156 job->reply->reply_payload_rcv_len = drv_fcxp->rsp_len;
3157 job->reply->reply_data.ctels_reply.status = FC_CTELS_STATUS_OK;
3159 job->reply->reply_payload_rcv_len =
3160 sizeof(struct fc_bsg_ctels_reply);
3161 job->reply_len = sizeof(uint32_t);
3162 job->reply->reply_data.ctels_reply.status =
3163 FC_CTELS_STATUS_REJECT;
3166 /* Copy the response data to the reply_payload sg list */
3167 sg_copy_from_buffer(job->reply_payload.sg_list,
3168 job->reply_payload.sg_cnt,
3169 (uint8_t *)rsp_buf_info->virt,
3170 job->reply_payload.payload_len);
3173 bfad_fcxp_free_mem(bfad, drv_fcxp->rspbuf_info,
3174 drv_fcxp->num_rsp_sgles);
3175 bfad_fcxp_free_mem(bfad, drv_fcxp->reqbuf_info,
3176 drv_fcxp->num_req_sgles);
3180 /* Need a copy to user op */
3181 if (copy_to_user(bsg_data->payload, (void *) bsg_fcpt,
3182 bsg_data->payload_len))
3188 job->reply->result = rc;
3190 if (rc == BFA_STATUS_OK)
3197 bfad_im_bsg_request(struct fc_bsg_job *job)
3199 uint32_t rc = BFA_STATUS_OK;
3201 switch (job->request->msgcode) {
3202 case FC_BSG_HST_VENDOR:
3203 /* Process BSG HST Vendor requests */
3204 rc = bfad_im_bsg_vendor_request(job);
3206 case FC_BSG_HST_ELS_NOLOGIN:
3207 case FC_BSG_RPT_ELS:
3210 /* Process BSG ELS/CT commands */
3211 rc = bfad_im_bsg_els_ct_request(job);
3214 job->reply->result = rc = -EINVAL;
3215 job->reply->reply_payload_rcv_len = 0;
3223 bfad_im_bsg_timeout(struct fc_bsg_job *job)
3225 /* Don't complete the BSG job request - return -EAGAIN
3226 * to reset bsg job timeout : for ELS/CT pass thru we
3227 * already have timer to track the request.