1 /*******************************************************************
2 * This file is part of the Emulex Linux Device Driver for *
3 * Fibre Channel Host Bus Adapters. *
4 * Copyright (C) 2009-2012 Emulex. All rights reserved. *
5 * EMULEX and SLI are trademarks of Emulex. *
8 * This program is free software; you can redistribute it and/or *
9 * modify it under the terms of version 2 of the GNU General *
10 * Public License as published by the Free Software Foundation. *
11 * This program is distributed in the hope that it will be useful. *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND *
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, *
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE *
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for *
17 * more details, a copy of which can be found in the file COPYING *
18 * included with this package. *
19 *******************************************************************/
21 #include <linux/interrupt.h>
22 #include <linux/mempool.h>
23 #include <linux/pci.h>
24 #include <linux/slab.h>
25 #include <linux/delay.h>
26 #include <linux/list.h>
28 #include <scsi/scsi.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_transport_fc.h>
31 #include <scsi/scsi_bsg_fc.h>
32 #include <scsi/fc/fc_fs.h>
37 #include "lpfc_sli4.h"
40 #include "lpfc_disc.h"
41 #include "lpfc_scsi.h"
43 #include "lpfc_logmsg.h"
44 #include "lpfc_crtn.h"
45 #include "lpfc_debugfs.h"
46 #include "lpfc_vport.h"
47 #include "lpfc_version.h"
49 struct lpfc_bsg_event {
50 struct list_head node;
54 /* Event type and waiter identifiers */
59 /* next two flags are here for the auto-delete logic */
60 unsigned long wait_time_stamp;
63 /* seen and not seen events */
64 struct list_head events_to_get;
65 struct list_head events_to_see;
67 /* job waiting for this event to finish */
68 struct fc_bsg_job *set_job;
71 struct lpfc_bsg_iocb {
72 struct lpfc_iocbq *cmdiocbq;
73 struct lpfc_iocbq *rspiocbq;
74 struct lpfc_dmabuf *bmp;
75 struct lpfc_nodelist *ndlp;
77 /* job waiting for this iocb to finish */
78 struct fc_bsg_job *set_job;
81 struct lpfc_bsg_mbox {
84 struct lpfc_dmabuf *dmabuffers; /* for BIU diags */
85 uint8_t *ext; /* extended mailbox data */
86 uint32_t mbOffset; /* from app */
87 uint32_t inExtWLen; /* from app */
88 uint32_t outExtWLen; /* from app */
90 /* job waiting for this mbox command to finish */
91 struct fc_bsg_job *set_job;
94 #define MENLO_DID 0x0000FC0E
96 struct lpfc_bsg_menlo {
97 struct lpfc_iocbq *cmdiocbq;
98 struct lpfc_iocbq *rspiocbq;
99 struct lpfc_dmabuf *bmp;
101 /* job waiting for this iocb to finish */
102 struct fc_bsg_job *set_job;
109 struct bsg_job_data {
112 struct lpfc_bsg_event *evt;
113 struct lpfc_bsg_iocb iocb;
114 struct lpfc_bsg_mbox mbox;
115 struct lpfc_bsg_menlo menlo;
120 struct list_head node;
127 #define BUF_SZ_4K 4096
128 #define SLI_CT_ELX_LOOPBACK 0x10
130 enum ELX_LOOPBACK_CMD {
131 ELX_LOOPBACK_XRI_SETUP,
135 #define ELX_LOOPBACK_HEADER_SZ \
136 (size_t)(&((struct lpfc_sli_ct_request *)NULL)->un)
138 struct lpfc_dmabufext {
139 struct lpfc_dmabuf dma;
145 * lpfc_bsg_send_mgmt_cmd_cmp - lpfc_bsg_send_mgmt_cmd's completion handler
146 * @phba: Pointer to HBA context object.
147 * @cmdiocbq: Pointer to command iocb.
148 * @rspiocbq: Pointer to response iocb.
150 * This function is the completion handler for iocbs issued using
151 * lpfc_bsg_send_mgmt_cmd function. This function is called by the
152 * ring event handler function without any lock held. This function
153 * can be called from both worker thread context and interrupt
154 * context. This function also can be called from another thread which
155 * cleans up the SLI layer objects.
156 * This function copies the contents of the response iocb to the
157 * response iocb memory object provided by the caller of
158 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
159 * sleeps for the iocb completion.
162 lpfc_bsg_send_mgmt_cmd_cmp(struct lpfc_hba *phba,
163 struct lpfc_iocbq *cmdiocbq,
164 struct lpfc_iocbq *rspiocbq)
166 struct bsg_job_data *dd_data;
167 struct fc_bsg_job *job;
169 struct lpfc_dmabuf *bmp;
170 struct lpfc_nodelist *ndlp;
171 struct lpfc_bsg_iocb *iocb;
175 spin_lock_irqsave(&phba->ct_ev_lock, flags);
176 dd_data = cmdiocbq->context2;
178 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
179 lpfc_sli_release_iocbq(phba, cmdiocbq);
183 iocb = &dd_data->context_un.iocb;
185 job->dd_data = NULL; /* so timeout handler does not reply */
188 rsp = &rspiocbq->iocb;
189 ndlp = cmdiocbq->context1;
191 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
192 job->request_payload.sg_cnt, DMA_TO_DEVICE);
193 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
194 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
196 if (rsp->ulpStatus) {
197 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
198 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
199 case IOERR_SEQUENCE_TIMEOUT:
202 case IOERR_INVALID_RPI:
212 job->reply->reply_payload_rcv_len =
213 rsp->un.genreq64.bdl.bdeSize;
215 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
216 lpfc_sli_release_iocbq(phba, cmdiocbq);
220 /* make error code available to userspace */
221 job->reply->result = rc;
222 /* complete the job back to userspace */
224 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
229 * lpfc_bsg_send_mgmt_cmd - send a CT command from a bsg request
230 * @job: fc_bsg_job to handle
233 lpfc_bsg_send_mgmt_cmd(struct fc_bsg_job *job)
235 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
236 struct lpfc_hba *phba = vport->phba;
237 struct lpfc_rport_data *rdata = job->rport->dd_data;
238 struct lpfc_nodelist *ndlp = rdata->pnode;
239 struct ulp_bde64 *bpl = NULL;
241 struct lpfc_iocbq *cmdiocbq = NULL;
243 struct lpfc_dmabuf *bmp = NULL;
246 struct scatterlist *sgel = NULL;
249 struct bsg_job_data *dd_data;
254 /* in case no data is transferred */
255 job->reply->reply_payload_rcv_len = 0;
257 /* allocate our bsg tracking structure */
258 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
260 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
261 "2733 Failed allocation of dd_data\n");
266 if (!lpfc_nlp_get(ndlp)) {
271 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
277 if (ndlp->nlp_flag & NLP_ELS_SND_MASK) {
282 cmdiocbq = lpfc_sli_get_iocbq(phba);
288 cmd = &cmdiocbq->iocb;
289 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
295 INIT_LIST_HEAD(&bmp->list);
296 bpl = (struct ulp_bde64 *) bmp->virt;
297 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
298 job->request_payload.sg_cnt, DMA_TO_DEVICE);
299 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
300 busaddr = sg_dma_address(sgel);
301 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
302 bpl->tus.f.bdeSize = sg_dma_len(sgel);
303 bpl->tus.w = cpu_to_le32(bpl->tus.w);
304 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
305 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
309 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
310 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
311 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
312 busaddr = sg_dma_address(sgel);
313 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
314 bpl->tus.f.bdeSize = sg_dma_len(sgel);
315 bpl->tus.w = cpu_to_le32(bpl->tus.w);
316 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
317 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
321 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
322 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
323 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
324 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
325 cmd->un.genreq64.bdl.bdeSize =
326 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
327 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
328 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
329 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
330 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
331 cmd->un.genreq64.w5.hcsw.Type = FC_TYPE_CT;
332 cmd->ulpBdeCount = 1;
334 cmd->ulpClass = CLASS3;
335 cmd->ulpContext = ndlp->nlp_rpi;
336 if (phba->sli_rev == LPFC_SLI_REV4)
337 cmd->ulpContext = phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
338 cmd->ulpOwner = OWN_CHIP;
339 cmdiocbq->vport = phba->pport;
340 cmdiocbq->context3 = bmp;
341 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
342 timeout = phba->fc_ratov * 2;
343 cmd->ulpTimeout = timeout;
345 cmdiocbq->iocb_cmpl = lpfc_bsg_send_mgmt_cmd_cmp;
346 cmdiocbq->context1 = ndlp;
347 cmdiocbq->context2 = dd_data;
348 dd_data->type = TYPE_IOCB;
349 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
350 dd_data->context_un.iocb.set_job = job;
351 dd_data->context_un.iocb.bmp = bmp;
353 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
354 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
358 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
359 writel(creg_val, phba->HCregaddr);
360 readl(phba->HCregaddr); /* flush */
363 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
364 if (iocb_stat == IOCB_SUCCESS)
365 return 0; /* done for now */
366 else if (iocb_stat == IOCB_BUSY)
372 /* iocb failed so cleanup */
373 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
374 job->request_payload.sg_cnt, DMA_TO_DEVICE);
375 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
376 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
378 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
381 lpfc_sli_release_iocbq(phba, cmdiocbq);
389 /* make error code available to userspace */
390 job->reply->result = rc;
396 * lpfc_bsg_rport_els_cmp - lpfc_bsg_rport_els's completion handler
397 * @phba: Pointer to HBA context object.
398 * @cmdiocbq: Pointer to command iocb.
399 * @rspiocbq: Pointer to response iocb.
401 * This function is the completion handler for iocbs issued using
402 * lpfc_bsg_rport_els_cmp function. This function is called by the
403 * ring event handler function without any lock held. This function
404 * can be called from both worker thread context and interrupt
405 * context. This function also can be called from other thread which
406 * cleans up the SLI layer objects.
407 * This function copies the contents of the response iocb to the
408 * response iocb memory object provided by the caller of
409 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
410 * sleeps for the iocb completion.
413 lpfc_bsg_rport_els_cmp(struct lpfc_hba *phba,
414 struct lpfc_iocbq *cmdiocbq,
415 struct lpfc_iocbq *rspiocbq)
417 struct bsg_job_data *dd_data;
418 struct fc_bsg_job *job;
420 struct lpfc_nodelist *ndlp;
421 struct lpfc_dmabuf *pbuflist = NULL;
422 struct fc_bsg_ctels_reply *els_reply;
427 spin_lock_irqsave(&phba->ct_ev_lock, flags);
428 dd_data = cmdiocbq->context1;
429 /* normal completion and timeout crossed paths, already done */
431 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
435 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
436 if (cmdiocbq->context2 && rspiocbq)
437 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
438 &rspiocbq->iocb, sizeof(IOCB_t));
440 job = dd_data->context_un.iocb.set_job;
441 cmdiocbq = dd_data->context_un.iocb.cmdiocbq;
442 rspiocbq = dd_data->context_un.iocb.rspiocbq;
443 rsp = &rspiocbq->iocb;
444 ndlp = dd_data->context_un.iocb.ndlp;
446 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
447 job->request_payload.sg_cnt, DMA_TO_DEVICE);
448 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
449 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
451 if (job->reply->result == -EAGAIN)
453 else if (rsp->ulpStatus == IOSTAT_SUCCESS)
454 job->reply->reply_payload_rcv_len =
455 rsp->un.elsreq64.bdl.bdeSize;
456 else if (rsp->ulpStatus == IOSTAT_LS_RJT) {
457 job->reply->reply_payload_rcv_len =
458 sizeof(struct fc_bsg_ctels_reply);
459 /* LS_RJT data returned in word 4 */
460 rjt_data = (uint8_t *)&rsp->un.ulpWord[4];
461 els_reply = &job->reply->reply_data.ctels_reply;
462 els_reply->status = FC_CTELS_STATUS_REJECT;
463 els_reply->rjt_data.action = rjt_data[3];
464 els_reply->rjt_data.reason_code = rjt_data[2];
465 els_reply->rjt_data.reason_explanation = rjt_data[1];
466 els_reply->rjt_data.vendor_unique = rjt_data[0];
470 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
471 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
472 lpfc_sli_release_iocbq(phba, rspiocbq);
473 lpfc_sli_release_iocbq(phba, cmdiocbq);
476 /* make error code available to userspace */
477 job->reply->result = rc;
479 /* complete the job back to userspace */
481 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
486 * lpfc_bsg_rport_els - send an ELS command from a bsg request
487 * @job: fc_bsg_job to handle
490 lpfc_bsg_rport_els(struct fc_bsg_job *job)
492 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
493 struct lpfc_hba *phba = vport->phba;
494 struct lpfc_rport_data *rdata = job->rport->dd_data;
495 struct lpfc_nodelist *ndlp = rdata->pnode;
499 struct lpfc_iocbq *rspiocbq;
500 struct lpfc_iocbq *cmdiocbq;
503 struct lpfc_dmabuf *pcmd;
504 struct lpfc_dmabuf *prsp;
505 struct lpfc_dmabuf *pbuflist = NULL;
506 struct ulp_bde64 *bpl;
509 struct scatterlist *sgel = NULL;
512 struct bsg_job_data *dd_data;
516 /* in case no data is transferred */
517 job->reply->reply_payload_rcv_len = 0;
519 /* allocate our bsg tracking structure */
520 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
522 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
523 "2735 Failed allocation of dd_data\n");
528 if (!lpfc_nlp_get(ndlp)) {
533 elscmd = job->request->rqst_data.r_els.els_code;
534 cmdsize = job->request_payload.payload_len;
535 rspsize = job->reply_payload.payload_len;
536 rspiocbq = lpfc_sli_get_iocbq(phba);
543 rsp = &rspiocbq->iocb;
546 cmdiocbq = lpfc_prep_els_iocb(vport, 1, cmdsize, 0, ndlp,
547 ndlp->nlp_DID, elscmd);
553 /* prep els iocb set context1 to the ndlp, context2 to the command
554 * dmabuf, context3 holds the data dmabuf
556 pcmd = (struct lpfc_dmabuf *) cmdiocbq->context2;
557 prsp = (struct lpfc_dmabuf *) pcmd->list.next;
558 lpfc_mbuf_free(phba, pcmd->virt, pcmd->phys);
560 lpfc_mbuf_free(phba, prsp->virt, prsp->phys);
562 cmdiocbq->context2 = NULL;
564 pbuflist = (struct lpfc_dmabuf *) cmdiocbq->context3;
565 bpl = (struct ulp_bde64 *) pbuflist->virt;
567 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
568 job->request_payload.sg_cnt, DMA_TO_DEVICE);
569 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
570 busaddr = sg_dma_address(sgel);
571 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
572 bpl->tus.f.bdeSize = sg_dma_len(sgel);
573 bpl->tus.w = cpu_to_le32(bpl->tus.w);
574 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
575 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
579 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
580 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
581 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
582 busaddr = sg_dma_address(sgel);
583 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
584 bpl->tus.f.bdeSize = sg_dma_len(sgel);
585 bpl->tus.w = cpu_to_le32(bpl->tus.w);
586 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
587 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
590 cmdiocbq->iocb.un.elsreq64.bdl.bdeSize =
591 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
592 if (phba->sli_rev == LPFC_SLI_REV4)
593 cmdiocbq->iocb.ulpContext = phba->sli4_hba.rpi_ids[rpi];
595 cmdiocbq->iocb.ulpContext = rpi;
596 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
597 cmdiocbq->context1 = NULL;
598 cmdiocbq->context2 = NULL;
600 cmdiocbq->iocb_cmpl = lpfc_bsg_rport_els_cmp;
601 cmdiocbq->context1 = dd_data;
602 cmdiocbq->context_un.ndlp = ndlp;
603 cmdiocbq->context2 = rspiocbq;
604 dd_data->type = TYPE_IOCB;
605 dd_data->context_un.iocb.cmdiocbq = cmdiocbq;
606 dd_data->context_un.iocb.rspiocbq = rspiocbq;
607 dd_data->context_un.iocb.set_job = job;
608 dd_data->context_un.iocb.bmp = NULL;
609 dd_data->context_un.iocb.ndlp = ndlp;
611 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
612 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
616 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
617 writel(creg_val, phba->HCregaddr);
618 readl(phba->HCregaddr); /* flush */
620 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq, 0);
622 if (rc == IOCB_SUCCESS)
623 return 0; /* done for now */
624 else if (rc == IOCB_BUSY)
630 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
631 job->request_payload.sg_cnt, DMA_TO_DEVICE);
632 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
633 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
635 lpfc_mbuf_free(phba, pbuflist->virt, pbuflist->phys);
637 lpfc_sli_release_iocbq(phba, cmdiocbq);
640 lpfc_sli_release_iocbq(phba, rspiocbq);
646 /* make error code available to userspace */
647 job->reply->result = rc;
653 * lpfc_bsg_event_free - frees an allocated event structure
654 * @kref: Pointer to a kref.
656 * Called from kref_put. Back cast the kref into an event structure address.
657 * Free any events to get, delete associated nodes, free any events to see,
658 * free any data then free the event itself.
661 lpfc_bsg_event_free(struct kref *kref)
663 struct lpfc_bsg_event *evt = container_of(kref, struct lpfc_bsg_event,
665 struct event_data *ed;
667 list_del(&evt->node);
669 while (!list_empty(&evt->events_to_get)) {
670 ed = list_entry(evt->events_to_get.next, typeof(*ed), node);
676 while (!list_empty(&evt->events_to_see)) {
677 ed = list_entry(evt->events_to_see.next, typeof(*ed), node);
687 * lpfc_bsg_event_ref - increments the kref for an event
688 * @evt: Pointer to an event structure.
691 lpfc_bsg_event_ref(struct lpfc_bsg_event *evt)
693 kref_get(&evt->kref);
697 * lpfc_bsg_event_unref - Uses kref_put to free an event structure
698 * @evt: Pointer to an event structure.
701 lpfc_bsg_event_unref(struct lpfc_bsg_event *evt)
703 kref_put(&evt->kref, lpfc_bsg_event_free);
707 * lpfc_bsg_event_new - allocate and initialize a event structure
708 * @ev_mask: Mask of events.
709 * @ev_reg_id: Event reg id.
710 * @ev_req_id: Event request id.
712 static struct lpfc_bsg_event *
713 lpfc_bsg_event_new(uint32_t ev_mask, int ev_reg_id, uint32_t ev_req_id)
715 struct lpfc_bsg_event *evt = kzalloc(sizeof(*evt), GFP_KERNEL);
720 INIT_LIST_HEAD(&evt->events_to_get);
721 INIT_LIST_HEAD(&evt->events_to_see);
722 evt->type_mask = ev_mask;
723 evt->req_id = ev_req_id;
724 evt->reg_id = ev_reg_id;
725 evt->wait_time_stamp = jiffies;
726 init_waitqueue_head(&evt->wq);
727 kref_init(&evt->kref);
732 * diag_cmd_data_free - Frees an lpfc dma buffer extension
733 * @phba: Pointer to HBA context object.
734 * @mlist: Pointer to an lpfc dma buffer extension.
737 diag_cmd_data_free(struct lpfc_hba *phba, struct lpfc_dmabufext *mlist)
739 struct lpfc_dmabufext *mlast;
740 struct pci_dev *pcidev;
741 struct list_head head, *curr, *next;
743 if ((!mlist) || (!lpfc_is_link_up(phba) &&
744 (phba->link_flag & LS_LOOPBACK_MODE))) {
748 pcidev = phba->pcidev;
749 list_add_tail(&head, &mlist->dma.list);
751 list_for_each_safe(curr, next, &head) {
752 mlast = list_entry(curr, struct lpfc_dmabufext , dma.list);
754 dma_free_coherent(&pcidev->dev,
764 * lpfc_bsg_ct_unsol_event - process an unsolicited CT command
769 * This function is called when an unsolicited CT command is received. It
770 * forwards the event to any processes registered to receive CT events.
773 lpfc_bsg_ct_unsol_event(struct lpfc_hba *phba, struct lpfc_sli_ring *pring,
774 struct lpfc_iocbq *piocbq)
776 uint32_t evt_req_id = 0;
779 struct lpfc_dmabuf *dmabuf = NULL;
780 struct lpfc_bsg_event *evt;
781 struct event_data *evt_dat = NULL;
782 struct lpfc_iocbq *iocbq;
784 struct list_head head;
785 struct ulp_bde64 *bde;
788 struct lpfc_dmabuf *bdeBuf1 = piocbq->context2;
789 struct lpfc_dmabuf *bdeBuf2 = piocbq->context3;
790 struct lpfc_hbq_entry *hbqe;
791 struct lpfc_sli_ct_request *ct_req;
792 struct fc_bsg_job *job = NULL;
796 INIT_LIST_HEAD(&head);
797 list_add_tail(&head, &piocbq->list);
799 if (piocbq->iocb.ulpBdeCount == 0 ||
800 piocbq->iocb.un.cont64[0].tus.f.bdeSize == 0)
801 goto error_ct_unsol_exit;
803 if (phba->link_state == LPFC_HBA_ERROR ||
804 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE)))
805 goto error_ct_unsol_exit;
807 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED)
810 dma_addr = getPaddr(piocbq->iocb.un.cont64[0].addrHigh,
811 piocbq->iocb.un.cont64[0].addrLow);
812 dmabuf = lpfc_sli_ringpostbuf_get(phba, pring, dma_addr);
815 goto error_ct_unsol_exit;
816 ct_req = (struct lpfc_sli_ct_request *)dmabuf->virt;
817 evt_req_id = ct_req->FsType;
818 cmd = ct_req->CommandResponse.bits.CmdRsp;
819 len = ct_req->CommandResponse.bits.Size;
820 if (!(phba->sli3_options & LPFC_SLI3_HBQ_ENABLED))
821 lpfc_sli_ringpostbuf_put(phba, pring, dmabuf);
823 spin_lock_irqsave(&phba->ct_ev_lock, flags);
824 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
825 if (!(evt->type_mask & FC_REG_CT_EVENT) ||
826 evt->req_id != evt_req_id)
829 lpfc_bsg_event_ref(evt);
830 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
831 evt_dat = kzalloc(sizeof(*evt_dat), GFP_KERNEL);
832 if (evt_dat == NULL) {
833 spin_lock_irqsave(&phba->ct_ev_lock, flags);
834 lpfc_bsg_event_unref(evt);
835 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
836 "2614 Memory allocation failed for "
841 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
842 /* take accumulated byte count from the last iocbq */
843 iocbq = list_entry(head.prev, typeof(*iocbq), list);
844 evt_dat->len = iocbq->iocb.unsli3.rcvsli3.acc_len;
846 list_for_each_entry(iocbq, &head, list) {
847 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++)
849 iocbq->iocb.un.cont64[i].tus.f.bdeSize;
853 evt_dat->data = kzalloc(evt_dat->len, GFP_KERNEL);
854 if (evt_dat->data == NULL) {
855 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
856 "2615 Memory allocation failed for "
857 "CT event data, size %d\n",
860 spin_lock_irqsave(&phba->ct_ev_lock, flags);
861 lpfc_bsg_event_unref(evt);
862 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
863 goto error_ct_unsol_exit;
866 list_for_each_entry(iocbq, &head, list) {
868 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
869 bdeBuf1 = iocbq->context2;
870 bdeBuf2 = iocbq->context3;
872 for (i = 0; i < iocbq->iocb.ulpBdeCount; i++) {
873 if (phba->sli3_options &
874 LPFC_SLI3_HBQ_ENABLED) {
876 hbqe = (struct lpfc_hbq_entry *)
877 &iocbq->iocb.un.ulpWord[0];
878 size = hbqe->bde.tus.f.bdeSize;
881 hbqe = (struct lpfc_hbq_entry *)
884 size = hbqe->bde.tus.f.bdeSize;
887 if ((offset + size) > evt_dat->len)
888 size = evt_dat->len - offset;
890 size = iocbq->iocb.un.cont64[i].
892 bde = &iocbq->iocb.un.cont64[i];
893 dma_addr = getPaddr(bde->addrHigh,
895 dmabuf = lpfc_sli_ringpostbuf_get(phba,
899 lpfc_printf_log(phba, KERN_ERR,
900 LOG_LIBDFC, "2616 No dmabuf "
901 "found for iocbq 0x%p\n",
903 kfree(evt_dat->data);
905 spin_lock_irqsave(&phba->ct_ev_lock,
907 lpfc_bsg_event_unref(evt);
908 spin_unlock_irqrestore(
909 &phba->ct_ev_lock, flags);
910 goto error_ct_unsol_exit;
912 memcpy((char *)(evt_dat->data) + offset,
915 if (evt_req_id != SLI_CT_ELX_LOOPBACK &&
916 !(phba->sli3_options &
917 LPFC_SLI3_HBQ_ENABLED)) {
918 lpfc_sli_ringpostbuf_put(phba, pring,
922 case ELX_LOOPBACK_DATA:
925 diag_cmd_data_free(phba,
926 (struct lpfc_dmabufext
929 case ELX_LOOPBACK_XRI_SETUP:
930 if ((phba->sli_rev ==
932 (phba->sli3_options &
933 LPFC_SLI3_HBQ_ENABLED
935 lpfc_in_buf_free(phba,
938 lpfc_post_buffer(phba,
944 if (!(phba->sli3_options &
945 LPFC_SLI3_HBQ_ENABLED))
946 lpfc_post_buffer(phba,
955 spin_lock_irqsave(&phba->ct_ev_lock, flags);
956 if (phba->sli_rev == LPFC_SLI_REV4) {
957 evt_dat->immed_dat = phba->ctx_idx;
958 phba->ctx_idx = (phba->ctx_idx + 1) % LPFC_CT_CTX_MAX;
959 /* Provide warning for over-run of the ct_ctx array */
960 if (phba->ct_ctx[evt_dat->immed_dat].valid ==
962 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
963 "2717 CT context array entry "
964 "[%d] over-run: oxid:x%x, "
965 "sid:x%x\n", phba->ctx_idx,
967 evt_dat->immed_dat].oxid,
969 evt_dat->immed_dat].SID);
970 phba->ct_ctx[evt_dat->immed_dat].rxid =
971 piocbq->iocb.ulpContext;
972 phba->ct_ctx[evt_dat->immed_dat].oxid =
973 piocbq->iocb.unsli3.rcvsli3.ox_id;
974 phba->ct_ctx[evt_dat->immed_dat].SID =
975 piocbq->iocb.un.rcvels.remoteID;
976 phba->ct_ctx[evt_dat->immed_dat].valid = UNSOL_VALID;
978 evt_dat->immed_dat = piocbq->iocb.ulpContext;
980 evt_dat->type = FC_REG_CT_EVENT;
981 list_add(&evt_dat->node, &evt->events_to_see);
982 if (evt_req_id == SLI_CT_ELX_LOOPBACK) {
983 wake_up_interruptible(&evt->wq);
984 lpfc_bsg_event_unref(evt);
988 list_move(evt->events_to_see.prev, &evt->events_to_get);
989 lpfc_bsg_event_unref(evt);
994 job->reply->reply_payload_rcv_len = size;
995 /* make error code available to userspace */
996 job->reply->result = 0;
998 /* complete the job back to userspace */
999 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1001 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1004 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1006 error_ct_unsol_exit:
1007 if (!list_empty(&head))
1009 if ((phba->sli_rev < LPFC_SLI_REV4) &&
1010 (evt_req_id == SLI_CT_ELX_LOOPBACK))
1016 * lpfc_bsg_ct_unsol_abort - handler ct abort to management plane
1017 * @phba: Pointer to HBA context object.
1018 * @dmabuf: pointer to a dmabuf that describes the FC sequence
1020 * This function handles abort to the CT command toward management plane
1023 * If the pending context of a CT command to management plane present, clears
1024 * such context and returns 1 for handled; otherwise, it returns 0 indicating
1025 * no context exists.
1028 lpfc_bsg_ct_unsol_abort(struct lpfc_hba *phba, struct hbq_dmabuf *dmabuf)
1030 struct fc_frame_header fc_hdr;
1031 struct fc_frame_header *fc_hdr_ptr = &fc_hdr;
1032 int ctx_idx, handled = 0;
1033 uint16_t oxid, rxid;
1036 memcpy(fc_hdr_ptr, dmabuf->hbuf.virt, sizeof(struct fc_frame_header));
1037 sid = sli4_sid_from_fc_hdr(fc_hdr_ptr);
1038 oxid = be16_to_cpu(fc_hdr_ptr->fh_ox_id);
1039 rxid = be16_to_cpu(fc_hdr_ptr->fh_rx_id);
1041 for (ctx_idx = 0; ctx_idx < LPFC_CT_CTX_MAX; ctx_idx++) {
1042 if (phba->ct_ctx[ctx_idx].valid != UNSOL_VALID)
1044 if (phba->ct_ctx[ctx_idx].rxid != rxid)
1046 if (phba->ct_ctx[ctx_idx].oxid != oxid)
1048 if (phba->ct_ctx[ctx_idx].SID != sid)
1050 phba->ct_ctx[ctx_idx].valid = UNSOL_INVALID;
1057 * lpfc_bsg_hba_set_event - process a SET_EVENT bsg vendor command
1058 * @job: SET_EVENT fc_bsg_job
1061 lpfc_bsg_hba_set_event(struct fc_bsg_job *job)
1063 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1064 struct lpfc_hba *phba = vport->phba;
1065 struct set_ct_event *event_req;
1066 struct lpfc_bsg_event *evt;
1068 struct bsg_job_data *dd_data = NULL;
1070 unsigned long flags;
1072 if (job->request_len <
1073 sizeof(struct fc_bsg_request) + sizeof(struct set_ct_event)) {
1074 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1075 "2612 Received SET_CT_EVENT below minimum "
1081 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1082 if (dd_data == NULL) {
1083 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1084 "2734 Failed allocation of dd_data\n");
1089 event_req = (struct set_ct_event *)
1090 job->request->rqst_data.h_vendor.vendor_cmd;
1091 ev_mask = ((uint32_t)(unsigned long)event_req->type_mask &
1093 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1094 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1095 if (evt->reg_id == event_req->ev_reg_id) {
1096 lpfc_bsg_event_ref(evt);
1097 evt->wait_time_stamp = jiffies;
1101 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1103 if (&evt->node == &phba->ct_ev_waiters) {
1104 /* no event waiting struct yet - first call */
1105 evt = lpfc_bsg_event_new(ev_mask, event_req->ev_reg_id,
1106 event_req->ev_req_id);
1108 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1109 "2617 Failed allocation of event "
1115 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1116 list_add(&evt->node, &phba->ct_ev_waiters);
1117 lpfc_bsg_event_ref(evt);
1118 evt->wait_time_stamp = jiffies;
1119 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1122 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1124 dd_data->type = TYPE_EVT;
1125 dd_data->context_un.evt = evt;
1126 evt->set_job = job; /* for unsolicited command */
1127 job->dd_data = dd_data; /* for fc transport timeout callback*/
1128 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1129 return 0; /* call job done later */
1132 if (dd_data != NULL)
1135 job->dd_data = NULL;
1140 * lpfc_bsg_hba_get_event - process a GET_EVENT bsg vendor command
1141 * @job: GET_EVENT fc_bsg_job
1144 lpfc_bsg_hba_get_event(struct fc_bsg_job *job)
1146 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1147 struct lpfc_hba *phba = vport->phba;
1148 struct get_ct_event *event_req;
1149 struct get_ct_event_reply *event_reply;
1150 struct lpfc_bsg_event *evt;
1151 struct event_data *evt_dat = NULL;
1152 unsigned long flags;
1155 if (job->request_len <
1156 sizeof(struct fc_bsg_request) + sizeof(struct get_ct_event)) {
1157 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1158 "2613 Received GET_CT_EVENT request below "
1164 event_req = (struct get_ct_event *)
1165 job->request->rqst_data.h_vendor.vendor_cmd;
1167 event_reply = (struct get_ct_event_reply *)
1168 job->reply->reply_data.vendor_reply.vendor_rsp;
1169 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1170 list_for_each_entry(evt, &phba->ct_ev_waiters, node) {
1171 if (evt->reg_id == event_req->ev_reg_id) {
1172 if (list_empty(&evt->events_to_get))
1174 lpfc_bsg_event_ref(evt);
1175 evt->wait_time_stamp = jiffies;
1176 evt_dat = list_entry(evt->events_to_get.prev,
1177 struct event_data, node);
1178 list_del(&evt_dat->node);
1182 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1184 /* The app may continue to ask for event data until it gets
1185 * an error indicating that there isn't anymore
1187 if (evt_dat == NULL) {
1188 job->reply->reply_payload_rcv_len = 0;
1193 if (evt_dat->len > job->request_payload.payload_len) {
1194 evt_dat->len = job->request_payload.payload_len;
1195 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1196 "2618 Truncated event data at %d "
1198 job->request_payload.payload_len);
1201 event_reply->type = evt_dat->type;
1202 event_reply->immed_data = evt_dat->immed_dat;
1203 if (evt_dat->len > 0)
1204 job->reply->reply_payload_rcv_len =
1205 sg_copy_from_buffer(job->request_payload.sg_list,
1206 job->request_payload.sg_cnt,
1207 evt_dat->data, evt_dat->len);
1209 job->reply->reply_payload_rcv_len = 0;
1212 kfree(evt_dat->data);
1216 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1217 lpfc_bsg_event_unref(evt);
1218 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1219 job->dd_data = NULL;
1220 job->reply->result = 0;
1225 job->dd_data = NULL;
1226 job->reply->result = rc;
1231 * lpfc_issue_ct_rsp_cmp - lpfc_issue_ct_rsp's completion handler
1232 * @phba: Pointer to HBA context object.
1233 * @cmdiocbq: Pointer to command iocb.
1234 * @rspiocbq: Pointer to response iocb.
1236 * This function is the completion handler for iocbs issued using
1237 * lpfc_issue_ct_rsp_cmp function. This function is called by the
1238 * ring event handler function without any lock held. This function
1239 * can be called from both worker thread context and interrupt
1240 * context. This function also can be called from other thread which
1241 * cleans up the SLI layer objects.
1242 * This function copy the contents of the response iocb to the
1243 * response iocb memory object provided by the caller of
1244 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
1245 * sleeps for the iocb completion.
1248 lpfc_issue_ct_rsp_cmp(struct lpfc_hba *phba,
1249 struct lpfc_iocbq *cmdiocbq,
1250 struct lpfc_iocbq *rspiocbq)
1252 struct bsg_job_data *dd_data;
1253 struct fc_bsg_job *job;
1255 struct lpfc_dmabuf *bmp;
1256 struct lpfc_nodelist *ndlp;
1257 unsigned long flags;
1260 spin_lock_irqsave(&phba->ct_ev_lock, flags);
1261 dd_data = cmdiocbq->context2;
1262 /* normal completion and timeout crossed paths, already done */
1264 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1268 job = dd_data->context_un.iocb.set_job;
1269 bmp = dd_data->context_un.iocb.bmp;
1270 rsp = &rspiocbq->iocb;
1271 ndlp = dd_data->context_un.iocb.ndlp;
1273 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1274 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1276 if (rsp->ulpStatus) {
1277 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
1278 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
1279 case IOERR_SEQUENCE_TIMEOUT:
1282 case IOERR_INVALID_RPI:
1292 job->reply->reply_payload_rcv_len =
1293 rsp->un.genreq64.bdl.bdeSize;
1295 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1296 lpfc_sli_release_iocbq(phba, cmdiocbq);
1300 /* make error code available to userspace */
1301 job->reply->result = rc;
1302 job->dd_data = NULL;
1303 /* complete the job back to userspace */
1305 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
1310 * lpfc_issue_ct_rsp - issue a ct response
1311 * @phba: Pointer to HBA context object.
1312 * @job: Pointer to the job object.
1313 * @tag: tag index value into the ports context exchange array.
1314 * @bmp: Pointer to a dma buffer descriptor.
1315 * @num_entry: Number of enties in the bde.
1318 lpfc_issue_ct_rsp(struct lpfc_hba *phba, struct fc_bsg_job *job, uint32_t tag,
1319 struct lpfc_dmabuf *bmp, int num_entry)
1322 struct lpfc_iocbq *ctiocb = NULL;
1324 struct lpfc_nodelist *ndlp = NULL;
1325 struct bsg_job_data *dd_data;
1328 /* allocate our bsg tracking structure */
1329 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
1331 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1332 "2736 Failed allocation of dd_data\n");
1337 /* Allocate buffer for command iocb */
1338 ctiocb = lpfc_sli_get_iocbq(phba);
1344 icmd = &ctiocb->iocb;
1345 icmd->un.xseq64.bdl.ulpIoTag32 = 0;
1346 icmd->un.xseq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
1347 icmd->un.xseq64.bdl.addrLow = putPaddrLow(bmp->phys);
1348 icmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
1349 icmd->un.xseq64.bdl.bdeSize = (num_entry * sizeof(struct ulp_bde64));
1350 icmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
1351 icmd->un.xseq64.w5.hcsw.Dfctl = 0;
1352 icmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_SOL_CTL;
1353 icmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
1355 /* Fill in rest of iocb */
1356 icmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
1357 icmd->ulpBdeCount = 1;
1359 icmd->ulpClass = CLASS3;
1360 if (phba->sli_rev == LPFC_SLI_REV4) {
1361 /* Do not issue unsol response if oxid not marked as valid */
1362 if (phba->ct_ctx[tag].valid != UNSOL_VALID) {
1364 goto issue_ct_rsp_exit;
1366 icmd->ulpContext = phba->ct_ctx[tag].rxid;
1367 icmd->unsli3.rcvsli3.ox_id = phba->ct_ctx[tag].oxid;
1368 ndlp = lpfc_findnode_did(phba->pport, phba->ct_ctx[tag].SID);
1370 lpfc_printf_log(phba, KERN_WARNING, LOG_ELS,
1371 "2721 ndlp null for oxid %x SID %x\n",
1373 phba->ct_ctx[tag].SID);
1375 goto issue_ct_rsp_exit;
1378 /* Check if the ndlp is active */
1379 if (!ndlp || !NLP_CHK_NODE_ACT(ndlp)) {
1381 goto issue_ct_rsp_exit;
1384 /* get a refernece count so the ndlp doesn't go away while
1387 if (!lpfc_nlp_get(ndlp)) {
1389 goto issue_ct_rsp_exit;
1392 icmd->un.ulpWord[3] =
1393 phba->sli4_hba.rpi_ids[ndlp->nlp_rpi];
1395 /* The exchange is done, mark the entry as invalid */
1396 phba->ct_ctx[tag].valid = UNSOL_INVALID;
1398 icmd->ulpContext = (ushort) tag;
1400 icmd->ulpTimeout = phba->fc_ratov * 2;
1402 /* Xmit CT response on exchange <xid> */
1403 lpfc_printf_log(phba, KERN_INFO, LOG_ELS,
1404 "2722 Xmit CT response on exchange x%x Data: x%x x%x x%x\n",
1405 icmd->ulpContext, icmd->ulpIoTag, tag, phba->link_state);
1407 ctiocb->iocb_cmpl = NULL;
1408 ctiocb->iocb_flag |= LPFC_IO_LIBDFC;
1409 ctiocb->vport = phba->pport;
1410 ctiocb->context3 = bmp;
1412 ctiocb->iocb_cmpl = lpfc_issue_ct_rsp_cmp;
1413 ctiocb->context2 = dd_data;
1414 ctiocb->context1 = ndlp;
1415 dd_data->type = TYPE_IOCB;
1416 dd_data->context_un.iocb.cmdiocbq = ctiocb;
1417 dd_data->context_un.iocb.rspiocbq = NULL;
1418 dd_data->context_un.iocb.set_job = job;
1419 dd_data->context_un.iocb.bmp = bmp;
1420 dd_data->context_un.iocb.ndlp = ndlp;
1422 if (phba->cfg_poll & DISABLE_FCP_RING_INT) {
1423 if (lpfc_readl(phba->HCregaddr, &creg_val)) {
1425 goto issue_ct_rsp_exit;
1427 creg_val |= (HC_R0INT_ENA << LPFC_FCP_RING);
1428 writel(creg_val, phba->HCregaddr);
1429 readl(phba->HCregaddr); /* flush */
1432 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, ctiocb, 0);
1434 if (rc == IOCB_SUCCESS)
1435 return 0; /* done for now */
1438 lpfc_sli_release_iocbq(phba, ctiocb);
1446 * lpfc_bsg_send_mgmt_rsp - process a SEND_MGMT_RESP bsg vendor command
1447 * @job: SEND_MGMT_RESP fc_bsg_job
1450 lpfc_bsg_send_mgmt_rsp(struct fc_bsg_job *job)
1452 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
1453 struct lpfc_hba *phba = vport->phba;
1454 struct send_mgmt_resp *mgmt_resp = (struct send_mgmt_resp *)
1455 job->request->rqst_data.h_vendor.vendor_cmd;
1456 struct ulp_bde64 *bpl;
1457 struct lpfc_dmabuf *bmp = NULL;
1458 struct scatterlist *sgel = NULL;
1462 uint32_t tag = mgmt_resp->tag;
1463 unsigned long reqbfrcnt =
1464 (unsigned long)job->request_payload.payload_len;
1467 /* in case no data is transferred */
1468 job->reply->reply_payload_rcv_len = 0;
1470 if (!reqbfrcnt || (reqbfrcnt > (80 * BUF_SZ_4K))) {
1472 goto send_mgmt_rsp_exit;
1475 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
1478 goto send_mgmt_rsp_exit;
1481 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
1484 goto send_mgmt_rsp_free_bmp;
1487 INIT_LIST_HEAD(&bmp->list);
1488 bpl = (struct ulp_bde64 *) bmp->virt;
1489 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
1490 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1491 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
1492 busaddr = sg_dma_address(sgel);
1493 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
1494 bpl->tus.f.bdeSize = sg_dma_len(sgel);
1495 bpl->tus.w = cpu_to_le32(bpl->tus.w);
1496 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
1497 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
1501 rc = lpfc_issue_ct_rsp(phba, job, tag, bmp, request_nseg);
1503 if (rc == IOCB_SUCCESS)
1504 return 0; /* done for now */
1506 /* TBD need to handle a timeout */
1507 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
1508 job->request_payload.sg_cnt, DMA_TO_DEVICE);
1510 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
1512 send_mgmt_rsp_free_bmp:
1515 /* make error code available to userspace */
1516 job->reply->result = rc;
1517 job->dd_data = NULL;
1522 * lpfc_bsg_diag_mode_enter - process preparing into device diag loopback mode
1523 * @phba: Pointer to HBA context object.
1525 * This function is responsible for preparing driver for diag loopback
1529 lpfc_bsg_diag_mode_enter(struct lpfc_hba *phba)
1531 struct lpfc_vport **vports;
1532 struct Scsi_Host *shost;
1533 struct lpfc_sli *psli;
1534 struct lpfc_sli_ring *pring;
1541 pring = &psli->ring[LPFC_FCP_RING];
1545 if ((phba->link_state == LPFC_HBA_ERROR) ||
1546 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
1547 (!(psli->sli_flag & LPFC_SLI_ACTIVE)))
1550 vports = lpfc_create_vport_work_array(phba);
1552 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1553 shost = lpfc_shost_from_vport(vports[i]);
1554 scsi_block_requests(shost);
1556 lpfc_destroy_vport_work_array(phba, vports);
1558 shost = lpfc_shost_from_vport(phba->pport);
1559 scsi_block_requests(shost);
1562 while (pring->txcmplq_cnt) {
1563 if (i++ > 500) /* wait up to 5 seconds */
1571 * lpfc_bsg_diag_mode_exit - exit process from device diag loopback mode
1572 * @phba: Pointer to HBA context object.
1574 * This function is responsible for driver exit processing of setting up
1575 * diag loopback mode on device.
1578 lpfc_bsg_diag_mode_exit(struct lpfc_hba *phba)
1580 struct Scsi_Host *shost;
1581 struct lpfc_vport **vports;
1584 vports = lpfc_create_vport_work_array(phba);
1586 for (i = 0; i <= phba->max_vpi && vports[i] != NULL; i++) {
1587 shost = lpfc_shost_from_vport(vports[i]);
1588 scsi_unblock_requests(shost);
1590 lpfc_destroy_vport_work_array(phba, vports);
1592 shost = lpfc_shost_from_vport(phba->pport);
1593 scsi_unblock_requests(shost);
1599 * lpfc_sli3_bsg_diag_loopback_mode - process an sli3 bsg vendor command
1600 * @phba: Pointer to HBA context object.
1601 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1603 * This function is responsible for placing an sli3 port into diagnostic
1604 * loopback mode in order to perform a diagnostic loopback test.
1605 * All new scsi requests are blocked, a small delay is used to allow the
1606 * scsi requests to complete then the link is brought down. If the link is
1607 * is placed in loopback mode then scsi requests are again allowed
1608 * so the scsi mid-layer doesn't give up on the port.
1609 * All of this is done in-line.
1612 lpfc_sli3_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1614 struct diag_mode_set *loopback_mode;
1615 uint32_t link_flags;
1617 LPFC_MBOXQ_t *pmboxq = NULL;
1618 int mbxstatus = MBX_SUCCESS;
1622 /* no data to return just the return code */
1623 job->reply->reply_payload_rcv_len = 0;
1625 if (job->request_len < sizeof(struct fc_bsg_request) +
1626 sizeof(struct diag_mode_set)) {
1627 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1628 "2738 Received DIAG MODE request size:%d "
1629 "below the minimum size:%d\n",
1631 (int)(sizeof(struct fc_bsg_request) +
1632 sizeof(struct diag_mode_set)));
1637 rc = lpfc_bsg_diag_mode_enter(phba);
1641 /* bring the link to diagnostic mode */
1642 loopback_mode = (struct diag_mode_set *)
1643 job->request->rqst_data.h_vendor.vendor_cmd;
1644 link_flags = loopback_mode->type;
1645 timeout = loopback_mode->timeout * 100;
1647 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1650 goto loopback_mode_exit;
1652 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1653 pmboxq->u.mb.mbxCommand = MBX_DOWN_LINK;
1654 pmboxq->u.mb.mbxOwner = OWN_HOST;
1656 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1658 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0)) {
1659 /* wait for link down before proceeding */
1661 while (phba->link_state != LPFC_LINK_DOWN) {
1662 if (i++ > timeout) {
1664 goto loopback_mode_exit;
1669 memset((void *)pmboxq, 0, sizeof(LPFC_MBOXQ_t));
1670 if (link_flags == INTERNAL_LOOP_BACK)
1671 pmboxq->u.mb.un.varInitLnk.link_flags = FLAGS_LOCAL_LB;
1673 pmboxq->u.mb.un.varInitLnk.link_flags =
1674 FLAGS_TOPOLOGY_MODE_LOOP;
1676 pmboxq->u.mb.mbxCommand = MBX_INIT_LINK;
1677 pmboxq->u.mb.mbxOwner = OWN_HOST;
1679 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq,
1682 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus))
1685 spin_lock_irq(&phba->hbalock);
1686 phba->link_flag |= LS_LOOPBACK_MODE;
1687 spin_unlock_irq(&phba->hbalock);
1688 /* wait for the link attention interrupt */
1692 while (phba->link_state != LPFC_HBA_READY) {
1693 if (i++ > timeout) {
1706 lpfc_bsg_diag_mode_exit(phba);
1709 * Let SLI layer release mboxq if mbox command completed after timeout.
1711 if (pmboxq && mbxstatus != MBX_TIMEOUT)
1712 mempool_free(pmboxq, phba->mbox_mem_pool);
1715 /* make error code available to userspace */
1716 job->reply->result = rc;
1717 /* complete the job back to userspace if no error */
1724 * lpfc_sli4_bsg_set_link_diag_state - set sli4 link diag state
1725 * @phba: Pointer to HBA context object.
1726 * @diag: Flag for set link to diag or nomral operation state.
1728 * This function is responsible for issuing a sli4 mailbox command for setting
1729 * link to either diag state or normal operation state.
1732 lpfc_sli4_bsg_set_link_diag_state(struct lpfc_hba *phba, uint32_t diag)
1734 LPFC_MBOXQ_t *pmboxq;
1735 struct lpfc_mbx_set_link_diag_state *link_diag_state;
1736 uint32_t req_len, alloc_len;
1737 int mbxstatus = MBX_SUCCESS, rc;
1739 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1743 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
1744 sizeof(struct lpfc_sli4_cfg_mhdr));
1745 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1746 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
1747 req_len, LPFC_SLI4_MBX_EMBED);
1748 if (alloc_len != req_len) {
1750 goto link_diag_state_set_out;
1752 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1753 "3128 Set link to diagnostic state:x%x (x%x/x%x)\n",
1754 diag, phba->sli4_hba.lnk_info.lnk_tp,
1755 phba->sli4_hba.lnk_info.lnk_no);
1757 link_diag_state = &pmboxq->u.mqe.un.link_diag_state;
1758 bf_set(lpfc_mbx_set_diag_state_diag_bit_valid, &link_diag_state->u.req,
1759 LPFC_DIAG_STATE_DIAG_BIT_VALID_CHANGE);
1760 bf_set(lpfc_mbx_set_diag_state_link_num, &link_diag_state->u.req,
1761 phba->sli4_hba.lnk_info.lnk_no);
1762 bf_set(lpfc_mbx_set_diag_state_link_type, &link_diag_state->u.req,
1763 phba->sli4_hba.lnk_info.lnk_tp);
1765 bf_set(lpfc_mbx_set_diag_state_diag,
1766 &link_diag_state->u.req, 1);
1768 bf_set(lpfc_mbx_set_diag_state_diag,
1769 &link_diag_state->u.req, 0);
1771 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1773 if ((mbxstatus == MBX_SUCCESS) && (pmboxq->u.mb.mbxStatus == 0))
1778 link_diag_state_set_out:
1779 if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1780 mempool_free(pmboxq, phba->mbox_mem_pool);
1786 * lpfc_sli4_bsg_set_internal_loopback - set sli4 internal loopback diagnostic
1787 * @phba: Pointer to HBA context object.
1789 * This function is responsible for issuing a sli4 mailbox command for setting
1790 * up internal loopback diagnostic.
1793 lpfc_sli4_bsg_set_internal_loopback(struct lpfc_hba *phba)
1795 LPFC_MBOXQ_t *pmboxq;
1796 uint32_t req_len, alloc_len;
1797 struct lpfc_mbx_set_link_diag_loopback *link_diag_loopback;
1798 int mbxstatus = MBX_SUCCESS, rc = 0;
1800 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
1803 req_len = (sizeof(struct lpfc_mbx_set_link_diag_loopback) -
1804 sizeof(struct lpfc_sli4_cfg_mhdr));
1805 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
1806 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_LOOPBACK,
1807 req_len, LPFC_SLI4_MBX_EMBED);
1808 if (alloc_len != req_len) {
1809 mempool_free(pmboxq, phba->mbox_mem_pool);
1812 link_diag_loopback = &pmboxq->u.mqe.un.link_diag_loopback;
1813 bf_set(lpfc_mbx_set_diag_state_link_num,
1814 &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_no);
1815 bf_set(lpfc_mbx_set_diag_state_link_type,
1816 &link_diag_loopback->u.req, phba->sli4_hba.lnk_info.lnk_tp);
1817 bf_set(lpfc_mbx_set_diag_lpbk_type, &link_diag_loopback->u.req,
1818 LPFC_DIAG_LOOPBACK_TYPE_INTERNAL);
1820 mbxstatus = lpfc_sli_issue_mbox_wait(phba, pmboxq, LPFC_MBOX_TMO);
1821 if ((mbxstatus != MBX_SUCCESS) || (pmboxq->u.mb.mbxStatus)) {
1822 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1823 "3127 Failed setup loopback mode mailbox "
1824 "command, rc:x%x, status:x%x\n", mbxstatus,
1825 pmboxq->u.mb.mbxStatus);
1828 if (pmboxq && (mbxstatus != MBX_TIMEOUT))
1829 mempool_free(pmboxq, phba->mbox_mem_pool);
1834 * lpfc_sli4_diag_fcport_reg_setup - setup port registrations for diagnostic
1835 * @phba: Pointer to HBA context object.
1837 * This function set up SLI4 FC port registrations for diagnostic run, which
1838 * includes all the rpis, vfi, and also vpi.
1841 lpfc_sli4_diag_fcport_reg_setup(struct lpfc_hba *phba)
1845 if (phba->pport->fc_flag & FC_VFI_REGISTERED) {
1846 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1847 "3136 Port still had vfi registered: "
1848 "mydid:x%x, fcfi:%d, vfi:%d, vpi:%d\n",
1849 phba->pport->fc_myDID, phba->fcf.fcfi,
1850 phba->sli4_hba.vfi_ids[phba->pport->vfi],
1851 phba->vpi_ids[phba->pport->vpi]);
1854 rc = lpfc_issue_reg_vfi(phba->pport);
1859 * lpfc_sli4_bsg_diag_loopback_mode - process an sli4 bsg vendor command
1860 * @phba: Pointer to HBA context object.
1861 * @job: LPFC_BSG_VENDOR_DIAG_MODE
1863 * This function is responsible for placing an sli4 port into diagnostic
1864 * loopback mode in order to perform a diagnostic loopback test.
1867 lpfc_sli4_bsg_diag_loopback_mode(struct lpfc_hba *phba, struct fc_bsg_job *job)
1869 struct diag_mode_set *loopback_mode;
1870 uint32_t link_flags, timeout;
1873 /* no data to return just the return code */
1874 job->reply->reply_payload_rcv_len = 0;
1876 if (job->request_len < sizeof(struct fc_bsg_request) +
1877 sizeof(struct diag_mode_set)) {
1878 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1879 "3011 Received DIAG MODE request size:%d "
1880 "below the minimum size:%d\n",
1882 (int)(sizeof(struct fc_bsg_request) +
1883 sizeof(struct diag_mode_set)));
1888 rc = lpfc_bsg_diag_mode_enter(phba);
1892 /* indicate we are in loobpack diagnostic mode */
1893 spin_lock_irq(&phba->hbalock);
1894 phba->link_flag |= LS_LOOPBACK_MODE;
1895 spin_unlock_irq(&phba->hbalock);
1897 /* reset port to start frome scratch */
1898 rc = lpfc_selective_reset(phba);
1902 /* bring the link to diagnostic mode */
1903 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1904 "3129 Bring link to diagnostic state.\n");
1905 loopback_mode = (struct diag_mode_set *)
1906 job->request->rqst_data.h_vendor.vendor_cmd;
1907 link_flags = loopback_mode->type;
1908 timeout = loopback_mode->timeout * 100;
1910 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
1912 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
1913 "3130 Failed to bring link to diagnostic "
1914 "state, rc:x%x\n", rc);
1915 goto loopback_mode_exit;
1918 /* wait for link down before proceeding */
1920 while (phba->link_state != LPFC_LINK_DOWN) {
1921 if (i++ > timeout) {
1923 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1924 "3131 Timeout waiting for link to "
1925 "diagnostic mode, timeout:%d ms\n",
1927 goto loopback_mode_exit;
1932 /* set up loopback mode */
1933 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1934 "3132 Set up loopback mode:x%x\n", link_flags);
1936 if (link_flags == INTERNAL_LOOP_BACK)
1937 rc = lpfc_sli4_bsg_set_internal_loopback(phba);
1938 else if (link_flags == EXTERNAL_LOOP_BACK)
1939 rc = lpfc_hba_init_link_fc_topology(phba,
1940 FLAGS_TOPOLOGY_MODE_PT_PT,
1944 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
1945 "3141 Loopback mode:x%x not supported\n",
1947 goto loopback_mode_exit;
1951 /* wait for the link attention interrupt */
1954 while (phba->link_state < LPFC_LINK_UP) {
1955 if (i++ > timeout) {
1957 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1958 "3137 Timeout waiting for link up "
1959 "in loopback mode, timeout:%d ms\n",
1967 /* port resource registration setup for loopback diagnostic */
1969 /* set up a none zero myDID for loopback test */
1970 phba->pport->fc_myDID = 1;
1971 rc = lpfc_sli4_diag_fcport_reg_setup(phba);
1973 goto loopback_mode_exit;
1976 /* wait for the port ready */
1979 while (phba->link_state != LPFC_HBA_READY) {
1980 if (i++ > timeout) {
1982 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
1983 "3133 Timeout waiting for port "
1984 "loopback mode ready, timeout:%d ms\n",
1993 /* clear loopback diagnostic mode */
1995 spin_lock_irq(&phba->hbalock);
1996 phba->link_flag &= ~LS_LOOPBACK_MODE;
1997 spin_unlock_irq(&phba->hbalock);
1999 lpfc_bsg_diag_mode_exit(phba);
2002 /* make error code available to userspace */
2003 job->reply->result = rc;
2004 /* complete the job back to userspace if no error */
2011 * lpfc_bsg_diag_loopback_mode - bsg vendor command for diag loopback mode
2012 * @job: LPFC_BSG_VENDOR_DIAG_MODE
2014 * This function is responsible for responding to check and dispatch bsg diag
2015 * command from the user to proper driver action routines.
2018 lpfc_bsg_diag_loopback_mode(struct fc_bsg_job *job)
2020 struct Scsi_Host *shost;
2021 struct lpfc_vport *vport;
2022 struct lpfc_hba *phba;
2028 vport = (struct lpfc_vport *)job->shost->hostdata;
2035 if (phba->sli_rev < LPFC_SLI_REV4)
2036 rc = lpfc_sli3_bsg_diag_loopback_mode(phba, job);
2037 else if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) ==
2038 LPFC_SLI_INTF_IF_TYPE_2)
2039 rc = lpfc_sli4_bsg_diag_loopback_mode(phba, job);
2047 * lpfc_sli4_bsg_diag_mode_end - sli4 bsg vendor command for ending diag mode
2048 * @job: LPFC_BSG_VENDOR_DIAG_MODE_END
2050 * This function is responsible for responding to check and dispatch bsg diag
2051 * command from the user to proper driver action routines.
2054 lpfc_sli4_bsg_diag_mode_end(struct fc_bsg_job *job)
2056 struct Scsi_Host *shost;
2057 struct lpfc_vport *vport;
2058 struct lpfc_hba *phba;
2059 struct diag_mode_set *loopback_mode_end_cmd;
2066 vport = (struct lpfc_vport *)job->shost->hostdata;
2073 if (phba->sli_rev < LPFC_SLI_REV4)
2075 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2076 LPFC_SLI_INTF_IF_TYPE_2)
2079 /* clear loopback diagnostic mode */
2080 spin_lock_irq(&phba->hbalock);
2081 phba->link_flag &= ~LS_LOOPBACK_MODE;
2082 spin_unlock_irq(&phba->hbalock);
2083 loopback_mode_end_cmd = (struct diag_mode_set *)
2084 job->request->rqst_data.h_vendor.vendor_cmd;
2085 timeout = loopback_mode_end_cmd->timeout * 100;
2087 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2089 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2090 "3139 Failed to bring link to diagnostic "
2091 "state, rc:x%x\n", rc);
2092 goto loopback_mode_end_exit;
2095 /* wait for link down before proceeding */
2097 while (phba->link_state != LPFC_LINK_DOWN) {
2098 if (i++ > timeout) {
2100 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
2101 "3140 Timeout waiting for link to "
2102 "diagnostic mode_end, timeout:%d ms\n",
2104 /* there is nothing much we can do here */
2110 /* reset port resource registrations */
2111 rc = lpfc_selective_reset(phba);
2112 phba->pport->fc_myDID = 0;
2114 loopback_mode_end_exit:
2115 /* make return code available to userspace */
2116 job->reply->result = rc;
2117 /* complete the job back to userspace if no error */
2124 * lpfc_sli4_bsg_link_diag_test - sli4 bsg vendor command for diag link test
2125 * @job: LPFC_BSG_VENDOR_DIAG_LINK_TEST
2127 * This function is to perform SLI4 diag link test request from the user
2131 lpfc_sli4_bsg_link_diag_test(struct fc_bsg_job *job)
2133 struct Scsi_Host *shost;
2134 struct lpfc_vport *vport;
2135 struct lpfc_hba *phba;
2136 LPFC_MBOXQ_t *pmboxq;
2137 struct sli4_link_diag *link_diag_test_cmd;
2138 uint32_t req_len, alloc_len;
2140 struct lpfc_mbx_run_link_diag_test *run_link_diag_test;
2141 union lpfc_sli4_cfg_shdr *shdr;
2142 uint32_t shdr_status, shdr_add_status;
2143 struct diag_status *diag_status_reply;
2144 int mbxstatus, rc = 0;
2151 vport = (struct lpfc_vport *)job->shost->hostdata;
2162 if (phba->sli_rev < LPFC_SLI_REV4) {
2166 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
2167 LPFC_SLI_INTF_IF_TYPE_2) {
2172 if (job->request_len < sizeof(struct fc_bsg_request) +
2173 sizeof(struct sli4_link_diag)) {
2174 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2175 "3013 Received LINK DIAG TEST request "
2176 " size:%d below the minimum size:%d\n",
2178 (int)(sizeof(struct fc_bsg_request) +
2179 sizeof(struct sli4_link_diag)));
2184 rc = lpfc_bsg_diag_mode_enter(phba);
2188 link_diag_test_cmd = (struct sli4_link_diag *)
2189 job->request->rqst_data.h_vendor.vendor_cmd;
2190 timeout = link_diag_test_cmd->timeout * 100;
2192 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 1);
2197 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2200 goto link_diag_test_exit;
2203 req_len = (sizeof(struct lpfc_mbx_set_link_diag_state) -
2204 sizeof(struct lpfc_sli4_cfg_mhdr));
2205 alloc_len = lpfc_sli4_config(phba, pmboxq, LPFC_MBOX_SUBSYSTEM_FCOE,
2206 LPFC_MBOX_OPCODE_FCOE_LINK_DIAG_STATE,
2207 req_len, LPFC_SLI4_MBX_EMBED);
2208 if (alloc_len != req_len) {
2210 goto link_diag_test_exit;
2212 run_link_diag_test = &pmboxq->u.mqe.un.link_diag_test;
2213 bf_set(lpfc_mbx_run_diag_test_link_num, &run_link_diag_test->u.req,
2214 phba->sli4_hba.lnk_info.lnk_no);
2215 bf_set(lpfc_mbx_run_diag_test_link_type, &run_link_diag_test->u.req,
2216 phba->sli4_hba.lnk_info.lnk_tp);
2217 bf_set(lpfc_mbx_run_diag_test_test_id, &run_link_diag_test->u.req,
2218 link_diag_test_cmd->test_id);
2219 bf_set(lpfc_mbx_run_diag_test_loops, &run_link_diag_test->u.req,
2220 link_diag_test_cmd->loops);
2221 bf_set(lpfc_mbx_run_diag_test_test_ver, &run_link_diag_test->u.req,
2222 link_diag_test_cmd->test_version);
2223 bf_set(lpfc_mbx_run_diag_test_err_act, &run_link_diag_test->u.req,
2224 link_diag_test_cmd->error_action);
2226 mbxstatus = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
2228 shdr = (union lpfc_sli4_cfg_shdr *)
2229 &pmboxq->u.mqe.un.sli4_config.header.cfg_shdr;
2230 shdr_status = bf_get(lpfc_mbox_hdr_status, &shdr->response);
2231 shdr_add_status = bf_get(lpfc_mbox_hdr_add_status, &shdr->response);
2232 if (shdr_status || shdr_add_status || mbxstatus) {
2233 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
2234 "3010 Run link diag test mailbox failed with "
2235 "mbx_status x%x status x%x, add_status x%x\n",
2236 mbxstatus, shdr_status, shdr_add_status);
2239 diag_status_reply = (struct diag_status *)
2240 job->reply->reply_data.vendor_reply.vendor_rsp;
2242 if (job->reply_len <
2243 sizeof(struct fc_bsg_request) + sizeof(struct diag_status)) {
2244 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2245 "3012 Received Run link diag test reply "
2246 "below minimum size (%d): reply_len:%d\n",
2247 (int)(sizeof(struct fc_bsg_request) +
2248 sizeof(struct diag_status)),
2254 diag_status_reply->mbox_status = mbxstatus;
2255 diag_status_reply->shdr_status = shdr_status;
2256 diag_status_reply->shdr_add_status = shdr_add_status;
2258 link_diag_test_exit:
2259 rc = lpfc_sli4_bsg_set_link_diag_state(phba, 0);
2262 mempool_free(pmboxq, phba->mbox_mem_pool);
2264 lpfc_bsg_diag_mode_exit(phba);
2267 /* make error code available to userspace */
2268 job->reply->result = rc;
2269 /* complete the job back to userspace if no error */
2276 * lpfcdiag_loop_self_reg - obtains a remote port login id
2277 * @phba: Pointer to HBA context object
2278 * @rpi: Pointer to a remote port login id
2280 * This function obtains a remote port login id so the diag loopback test
2281 * can send and receive its own unsolicited CT command.
2283 static int lpfcdiag_loop_self_reg(struct lpfc_hba *phba, uint16_t *rpi)
2286 struct lpfc_dmabuf *dmabuff;
2289 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2293 if (phba->sli_rev < LPFC_SLI_REV4)
2294 status = lpfc_reg_rpi(phba, 0, phba->pport->fc_myDID,
2295 (uint8_t *)&phba->pport->fc_sparam,
2298 *rpi = lpfc_sli4_alloc_rpi(phba);
2299 status = lpfc_reg_rpi(phba, phba->pport->vpi,
2300 phba->pport->fc_myDID,
2301 (uint8_t *)&phba->pport->fc_sparam,
2306 mempool_free(mbox, phba->mbox_mem_pool);
2307 if (phba->sli_rev == LPFC_SLI_REV4)
2308 lpfc_sli4_free_rpi(phba, *rpi);
2312 dmabuff = (struct lpfc_dmabuf *) mbox->context1;
2313 mbox->context1 = NULL;
2314 mbox->context2 = NULL;
2315 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2317 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2318 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2320 if (status != MBX_TIMEOUT)
2321 mempool_free(mbox, phba->mbox_mem_pool);
2322 if (phba->sli_rev == LPFC_SLI_REV4)
2323 lpfc_sli4_free_rpi(phba, *rpi);
2327 if (phba->sli_rev < LPFC_SLI_REV4)
2328 *rpi = mbox->u.mb.un.varWords[0];
2330 lpfc_mbuf_free(phba, dmabuff->virt, dmabuff->phys);
2332 mempool_free(mbox, phba->mbox_mem_pool);
2337 * lpfcdiag_loop_self_unreg - unregs from the rpi
2338 * @phba: Pointer to HBA context object
2339 * @rpi: Remote port login id
2341 * This function unregisters the rpi obtained in lpfcdiag_loop_self_reg
2343 static int lpfcdiag_loop_self_unreg(struct lpfc_hba *phba, uint16_t rpi)
2348 /* Allocate mboxq structure */
2349 mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
2353 if (phba->sli_rev < LPFC_SLI_REV4)
2354 lpfc_unreg_login(phba, 0, rpi, mbox);
2356 lpfc_unreg_login(phba, phba->pport->vpi,
2357 phba->sli4_hba.rpi_ids[rpi], mbox);
2359 status = lpfc_sli_issue_mbox_wait(phba, mbox, LPFC_MBOX_TMO);
2361 if ((status != MBX_SUCCESS) || (mbox->u.mb.mbxStatus)) {
2362 if (status != MBX_TIMEOUT)
2363 mempool_free(mbox, phba->mbox_mem_pool);
2366 mempool_free(mbox, phba->mbox_mem_pool);
2367 if (phba->sli_rev == LPFC_SLI_REV4)
2368 lpfc_sli4_free_rpi(phba, rpi);
2373 * lpfcdiag_loop_get_xri - obtains the transmit and receive ids
2374 * @phba: Pointer to HBA context object
2375 * @rpi: Remote port login id
2376 * @txxri: Pointer to transmit exchange id
2377 * @rxxri: Pointer to response exchabge id
2379 * This function obtains the transmit and receive ids required to send
2380 * an unsolicited ct command with a payload. A special lpfc FsType and CmdRsp
2381 * flags are used to the unsolicted response handler is able to process
2382 * the ct command sent on the same port.
2384 static int lpfcdiag_loop_get_xri(struct lpfc_hba *phba, uint16_t rpi,
2385 uint16_t *txxri, uint16_t * rxxri)
2387 struct lpfc_bsg_event *evt;
2388 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
2390 struct lpfc_dmabuf *dmabuf;
2391 struct ulp_bde64 *bpl = NULL;
2392 struct lpfc_sli_ct_request *ctreq = NULL;
2396 unsigned long flags;
2400 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2401 SLI_CT_ELX_LOOPBACK);
2405 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2406 list_add(&evt->node, &phba->ct_ev_waiters);
2407 lpfc_bsg_event_ref(evt);
2408 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2410 cmdiocbq = lpfc_sli_get_iocbq(phba);
2411 rspiocbq = lpfc_sli_get_iocbq(phba);
2413 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2415 dmabuf->virt = lpfc_mbuf_alloc(phba, 0, &dmabuf->phys);
2417 INIT_LIST_HEAD(&dmabuf->list);
2418 bpl = (struct ulp_bde64 *) dmabuf->virt;
2419 memset(bpl, 0, sizeof(*bpl));
2420 ctreq = (struct lpfc_sli_ct_request *)(bpl + 1);
2422 le32_to_cpu(putPaddrHigh(dmabuf->phys +
2425 le32_to_cpu(putPaddrLow(dmabuf->phys +
2427 bpl->tus.f.bdeFlags = 0;
2428 bpl->tus.f.bdeSize = ELX_LOOPBACK_HEADER_SZ;
2429 bpl->tus.w = le32_to_cpu(bpl->tus.w);
2433 if (cmdiocbq == NULL || rspiocbq == NULL ||
2434 dmabuf == NULL || bpl == NULL || ctreq == NULL ||
2435 dmabuf->virt == NULL) {
2437 goto err_get_xri_exit;
2440 cmd = &cmdiocbq->iocb;
2441 rsp = &rspiocbq->iocb;
2443 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2445 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2446 ctreq->RevisionId.bits.InId = 0;
2447 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2448 ctreq->FsSubType = 0;
2449 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_XRI_SETUP;
2450 ctreq->CommandResponse.bits.Size = 0;
2453 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(dmabuf->phys);
2454 cmd->un.xseq64.bdl.addrLow = putPaddrLow(dmabuf->phys);
2455 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
2456 cmd->un.xseq64.bdl.bdeSize = sizeof(*bpl);
2458 cmd->un.xseq64.w5.hcsw.Fctl = LA;
2459 cmd->un.xseq64.w5.hcsw.Dfctl = 0;
2460 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
2461 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
2463 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CR;
2464 cmd->ulpBdeCount = 1;
2466 cmd->ulpClass = CLASS3;
2467 cmd->ulpContext = rpi;
2469 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
2470 cmdiocbq->vport = phba->pport;
2472 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
2474 (phba->fc_ratov * 2)
2475 + LPFC_DRVR_TIMEOUT);
2478 goto err_get_xri_exit;
2480 *txxri = rsp->ulpContext;
2483 evt->wait_time_stamp = jiffies;
2484 time_left = wait_event_interruptible_timeout(
2485 evt->wq, !list_empty(&evt->events_to_see),
2486 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
2487 if (list_empty(&evt->events_to_see))
2488 ret_val = (time_left) ? -EINTR : -ETIMEDOUT;
2490 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2491 list_move(evt->events_to_see.prev, &evt->events_to_get);
2492 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2493 *rxxri = (list_entry(evt->events_to_get.prev,
2494 typeof(struct event_data),
2500 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2501 lpfc_bsg_event_unref(evt); /* release ref */
2502 lpfc_bsg_event_unref(evt); /* delete */
2503 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2507 lpfc_mbuf_free(phba, dmabuf->virt, dmabuf->phys);
2511 if (cmdiocbq && (iocb_stat != IOCB_TIMEDOUT))
2512 lpfc_sli_release_iocbq(phba, cmdiocbq);
2514 lpfc_sli_release_iocbq(phba, rspiocbq);
2519 * lpfc_bsg_dma_page_alloc - allocate a bsg mbox page sized dma buffers
2520 * @phba: Pointer to HBA context object
2522 * This function allocates BSG_MBOX_SIZE (4KB) page size dma buffer and.
2523 * retruns the pointer to the buffer.
2525 static struct lpfc_dmabuf *
2526 lpfc_bsg_dma_page_alloc(struct lpfc_hba *phba)
2528 struct lpfc_dmabuf *dmabuf;
2529 struct pci_dev *pcidev = phba->pcidev;
2531 /* allocate dma buffer struct */
2532 dmabuf = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2536 INIT_LIST_HEAD(&dmabuf->list);
2538 /* now, allocate dma buffer */
2539 dmabuf->virt = dma_alloc_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2540 &(dmabuf->phys), GFP_KERNEL);
2542 if (!dmabuf->virt) {
2546 memset((uint8_t *)dmabuf->virt, 0, BSG_MBOX_SIZE);
2552 * lpfc_bsg_dma_page_free - free a bsg mbox page sized dma buffer
2553 * @phba: Pointer to HBA context object.
2554 * @dmabuf: Pointer to the bsg mbox page sized dma buffer descriptor.
2556 * This routine just simply frees a dma buffer and its associated buffer
2557 * descriptor referred by @dmabuf.
2560 lpfc_bsg_dma_page_free(struct lpfc_hba *phba, struct lpfc_dmabuf *dmabuf)
2562 struct pci_dev *pcidev = phba->pcidev;
2568 dma_free_coherent(&pcidev->dev, BSG_MBOX_SIZE,
2569 dmabuf->virt, dmabuf->phys);
2575 * lpfc_bsg_dma_page_list_free - free a list of bsg mbox page sized dma buffers
2576 * @phba: Pointer to HBA context object.
2577 * @dmabuf_list: Pointer to a list of bsg mbox page sized dma buffer descs.
2579 * This routine just simply frees all dma buffers and their associated buffer
2580 * descriptors referred by @dmabuf_list.
2583 lpfc_bsg_dma_page_list_free(struct lpfc_hba *phba,
2584 struct list_head *dmabuf_list)
2586 struct lpfc_dmabuf *dmabuf, *next_dmabuf;
2588 if (list_empty(dmabuf_list))
2591 list_for_each_entry_safe(dmabuf, next_dmabuf, dmabuf_list, list) {
2592 list_del_init(&dmabuf->list);
2593 lpfc_bsg_dma_page_free(phba, dmabuf);
2599 * diag_cmd_data_alloc - fills in a bde struct with dma buffers
2600 * @phba: Pointer to HBA context object
2601 * @bpl: Pointer to 64 bit bde structure
2602 * @size: Number of bytes to process
2603 * @nocopydata: Flag to copy user data into the allocated buffer
2605 * This function allocates page size buffers and populates an lpfc_dmabufext.
2606 * If allowed the user data pointed to with indataptr is copied into the kernel
2607 * memory. The chained list of page size buffers is returned.
2609 static struct lpfc_dmabufext *
2610 diag_cmd_data_alloc(struct lpfc_hba *phba,
2611 struct ulp_bde64 *bpl, uint32_t size,
2614 struct lpfc_dmabufext *mlist = NULL;
2615 struct lpfc_dmabufext *dmp;
2616 int cnt, offset = 0, i = 0;
2617 struct pci_dev *pcidev;
2619 pcidev = phba->pcidev;
2622 /* We get chunks of 4K */
2623 if (size > BUF_SZ_4K)
2628 /* allocate struct lpfc_dmabufext buffer header */
2629 dmp = kmalloc(sizeof(struct lpfc_dmabufext), GFP_KERNEL);
2633 INIT_LIST_HEAD(&dmp->dma.list);
2635 /* Queue it to a linked list */
2637 list_add_tail(&dmp->dma.list, &mlist->dma.list);
2641 /* allocate buffer */
2642 dmp->dma.virt = dma_alloc_coherent(&pcidev->dev,
2653 bpl->tus.f.bdeFlags = 0;
2654 pci_dma_sync_single_for_device(phba->pcidev,
2655 dmp->dma.phys, LPFC_BPL_SIZE, PCI_DMA_TODEVICE);
2658 memset((uint8_t *)dmp->dma.virt, 0, cnt);
2659 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
2662 /* build buffer ptr list for IOCB */
2663 bpl->addrLow = le32_to_cpu(putPaddrLow(dmp->dma.phys));
2664 bpl->addrHigh = le32_to_cpu(putPaddrHigh(dmp->dma.phys));
2665 bpl->tus.f.bdeSize = (ushort) cnt;
2666 bpl->tus.w = le32_to_cpu(bpl->tus.w);
2677 diag_cmd_data_free(phba, mlist);
2682 * lpfcdiag_loop_post_rxbufs - post the receive buffers for an unsol CT cmd
2683 * @phba: Pointer to HBA context object
2684 * @rxxri: Receive exchange id
2685 * @len: Number of data bytes
2687 * This function allocates and posts a data buffer of sufficient size to receive
2688 * an unsolicted CT command.
2690 static int lpfcdiag_loop_post_rxbufs(struct lpfc_hba *phba, uint16_t rxxri,
2693 struct lpfc_sli *psli = &phba->sli;
2694 struct lpfc_sli_ring *pring = &psli->ring[LPFC_ELS_RING];
2695 struct lpfc_iocbq *cmdiocbq;
2697 struct list_head head, *curr, *next;
2698 struct lpfc_dmabuf *rxbmp;
2699 struct lpfc_dmabuf *dmp;
2700 struct lpfc_dmabuf *mp[2] = {NULL, NULL};
2701 struct ulp_bde64 *rxbpl = NULL;
2703 struct lpfc_dmabufext *rxbuffer = NULL;
2708 cmdiocbq = lpfc_sli_get_iocbq(phba);
2709 rxbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2710 if (rxbmp != NULL) {
2711 rxbmp->virt = lpfc_mbuf_alloc(phba, 0, &rxbmp->phys);
2713 INIT_LIST_HEAD(&rxbmp->list);
2714 rxbpl = (struct ulp_bde64 *) rxbmp->virt;
2715 rxbuffer = diag_cmd_data_alloc(phba, rxbpl, len, 0);
2719 if (!cmdiocbq || !rxbmp || !rxbpl || !rxbuffer) {
2721 goto err_post_rxbufs_exit;
2724 /* Queue buffers for the receive exchange */
2725 num_bde = (uint32_t)rxbuffer->flag;
2726 dmp = &rxbuffer->dma;
2728 cmd = &cmdiocbq->iocb;
2731 INIT_LIST_HEAD(&head);
2732 list_add_tail(&head, &dmp->list);
2733 list_for_each_safe(curr, next, &head) {
2734 mp[i] = list_entry(curr, struct lpfc_dmabuf, list);
2737 if (phba->sli3_options & LPFC_SLI3_HBQ_ENABLED) {
2738 mp[i]->buffer_tag = lpfc_sli_get_buffer_tag(phba);
2739 cmd->un.quexri64cx.buff.bde.addrHigh =
2740 putPaddrHigh(mp[i]->phys);
2741 cmd->un.quexri64cx.buff.bde.addrLow =
2742 putPaddrLow(mp[i]->phys);
2743 cmd->un.quexri64cx.buff.bde.tus.f.bdeSize =
2744 ((struct lpfc_dmabufext *)mp[i])->size;
2745 cmd->un.quexri64cx.buff.buffer_tag = mp[i]->buffer_tag;
2746 cmd->ulpCommand = CMD_QUE_XRI64_CX;
2749 cmd->ulpBdeCount = 1;
2750 cmd->unsli3.que_xri64cx_ext_words.ebde_count = 0;
2753 cmd->un.cont64[i].addrHigh = putPaddrHigh(mp[i]->phys);
2754 cmd->un.cont64[i].addrLow = putPaddrLow(mp[i]->phys);
2755 cmd->un.cont64[i].tus.f.bdeSize =
2756 ((struct lpfc_dmabufext *)mp[i])->size;
2757 cmd->ulpBdeCount = ++i;
2759 if ((--num_bde > 0) && (i < 2))
2762 cmd->ulpCommand = CMD_QUE_XRI_BUF64_CX;
2766 cmd->ulpClass = CLASS3;
2767 cmd->ulpContext = rxxri;
2769 iocb_stat = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
2771 if (iocb_stat == IOCB_ERROR) {
2772 diag_cmd_data_free(phba,
2773 (struct lpfc_dmabufext *)mp[0]);
2775 diag_cmd_data_free(phba,
2776 (struct lpfc_dmabufext *)mp[1]);
2777 dmp = list_entry(next, struct lpfc_dmabuf, list);
2779 goto err_post_rxbufs_exit;
2782 lpfc_sli_ringpostbuf_put(phba, pring, mp[0]);
2784 lpfc_sli_ringpostbuf_put(phba, pring, mp[1]);
2788 /* The iocb was freed by lpfc_sli_issue_iocb */
2789 cmdiocbq = lpfc_sli_get_iocbq(phba);
2791 dmp = list_entry(next, struct lpfc_dmabuf, list);
2793 goto err_post_rxbufs_exit;
2796 cmd = &cmdiocbq->iocb;
2801 err_post_rxbufs_exit:
2805 lpfc_mbuf_free(phba, rxbmp->virt, rxbmp->phys);
2810 lpfc_sli_release_iocbq(phba, cmdiocbq);
2815 * lpfc_bsg_diag_loopback_run - run loopback on a port by issue ct cmd to itself
2816 * @job: LPFC_BSG_VENDOR_DIAG_TEST fc_bsg_job
2818 * This function receives a user data buffer to be transmitted and received on
2819 * the same port, the link must be up and in loopback mode prior
2821 * 1. A kernel buffer is allocated to copy the user data into.
2822 * 2. The port registers with "itself".
2823 * 3. The transmit and receive exchange ids are obtained.
2824 * 4. The receive exchange id is posted.
2825 * 5. A new els loopback event is created.
2826 * 6. The command and response iocbs are allocated.
2827 * 7. The cmd iocb FsType is set to elx loopback and the CmdRsp to looppback.
2829 * This function is meant to be called n times while the port is in loopback
2830 * so it is the apps responsibility to issue a reset to take the port out
2834 lpfc_bsg_diag_loopback_run(struct fc_bsg_job *job)
2836 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
2837 struct lpfc_hba *phba = vport->phba;
2838 struct diag_mode_test *diag_mode;
2839 struct lpfc_bsg_event *evt;
2840 struct event_data *evdat;
2841 struct lpfc_sli *psli = &phba->sli;
2844 size_t segment_len = 0, segment_offset = 0, current_offset = 0;
2846 struct lpfc_iocbq *cmdiocbq, *rspiocbq = NULL;
2847 IOCB_t *cmd, *rsp = NULL;
2848 struct lpfc_sli_ct_request *ctreq;
2849 struct lpfc_dmabuf *txbmp;
2850 struct ulp_bde64 *txbpl = NULL;
2851 struct lpfc_dmabufext *txbuffer = NULL;
2852 struct list_head head;
2853 struct lpfc_dmabuf *curr;
2854 uint16_t txxri = 0, rxxri;
2856 uint8_t *ptr = NULL, *rx_databuf = NULL;
2860 unsigned long flags;
2861 void *dataout = NULL;
2864 /* in case no data is returned return just the return code */
2865 job->reply->reply_payload_rcv_len = 0;
2867 if (job->request_len <
2868 sizeof(struct fc_bsg_request) + sizeof(struct diag_mode_test)) {
2869 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
2870 "2739 Received DIAG TEST request below minimum "
2873 goto loopback_test_exit;
2876 if (job->request_payload.payload_len !=
2877 job->reply_payload.payload_len) {
2879 goto loopback_test_exit;
2881 diag_mode = (struct diag_mode_test *)
2882 job->request->rqst_data.h_vendor.vendor_cmd;
2884 if ((phba->link_state == LPFC_HBA_ERROR) ||
2885 (psli->sli_flag & LPFC_BLOCK_MGMT_IO) ||
2886 (!(psli->sli_flag & LPFC_SLI_ACTIVE))) {
2888 goto loopback_test_exit;
2891 if (!lpfc_is_link_up(phba) || !(phba->link_flag & LS_LOOPBACK_MODE)) {
2893 goto loopback_test_exit;
2896 size = job->request_payload.payload_len;
2897 full_size = size + ELX_LOOPBACK_HEADER_SZ; /* plus the header */
2899 if ((size == 0) || (size > 80 * BUF_SZ_4K)) {
2901 goto loopback_test_exit;
2904 if (full_size >= BUF_SZ_4K) {
2906 * Allocate memory for ioctl data. If buffer is bigger than 64k,
2907 * then we allocate 64k and re-use that buffer over and over to
2908 * xfer the whole block. This is because Linux kernel has a
2909 * problem allocating more than 120k of kernel space memory. Saw
2910 * problem with GET_FCPTARGETMAPPING...
2912 if (size <= (64 * 1024))
2913 total_mem = full_size;
2915 total_mem = 64 * 1024;
2917 /* Allocate memory for ioctl data */
2918 total_mem = BUF_SZ_4K;
2920 dataout = kmalloc(total_mem, GFP_KERNEL);
2921 if (dataout == NULL) {
2923 goto loopback_test_exit;
2927 ptr += ELX_LOOPBACK_HEADER_SZ;
2928 sg_copy_to_buffer(job->request_payload.sg_list,
2929 job->request_payload.sg_cnt,
2931 rc = lpfcdiag_loop_self_reg(phba, &rpi);
2933 goto loopback_test_exit;
2935 if (phba->sli_rev < LPFC_SLI_REV4) {
2936 rc = lpfcdiag_loop_get_xri(phba, rpi, &txxri, &rxxri);
2938 lpfcdiag_loop_self_unreg(phba, rpi);
2939 goto loopback_test_exit;
2942 rc = lpfcdiag_loop_post_rxbufs(phba, rxxri, full_size);
2944 lpfcdiag_loop_self_unreg(phba, rpi);
2945 goto loopback_test_exit;
2948 evt = lpfc_bsg_event_new(FC_REG_CT_EVENT, current->pid,
2949 SLI_CT_ELX_LOOPBACK);
2951 lpfcdiag_loop_self_unreg(phba, rpi);
2953 goto loopback_test_exit;
2956 spin_lock_irqsave(&phba->ct_ev_lock, flags);
2957 list_add(&evt->node, &phba->ct_ev_waiters);
2958 lpfc_bsg_event_ref(evt);
2959 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
2961 cmdiocbq = lpfc_sli_get_iocbq(phba);
2962 if (phba->sli_rev < LPFC_SLI_REV4)
2963 rspiocbq = lpfc_sli_get_iocbq(phba);
2964 txbmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
2967 txbmp->virt = lpfc_mbuf_alloc(phba, 0, &txbmp->phys);
2969 INIT_LIST_HEAD(&txbmp->list);
2970 txbpl = (struct ulp_bde64 *) txbmp->virt;
2971 txbuffer = diag_cmd_data_alloc(phba,
2972 txbpl, full_size, 0);
2976 if (!cmdiocbq || !txbmp || !txbpl || !txbuffer || !txbmp->virt) {
2978 goto err_loopback_test_exit;
2980 if ((phba->sli_rev < LPFC_SLI_REV4) && !rspiocbq) {
2982 goto err_loopback_test_exit;
2985 cmd = &cmdiocbq->iocb;
2986 if (phba->sli_rev < LPFC_SLI_REV4)
2987 rsp = &rspiocbq->iocb;
2989 INIT_LIST_HEAD(&head);
2990 list_add_tail(&head, &txbuffer->dma.list);
2991 list_for_each_entry(curr, &head, list) {
2992 segment_len = ((struct lpfc_dmabufext *)curr)->size;
2993 if (current_offset == 0) {
2995 memset(ctreq, 0, ELX_LOOPBACK_HEADER_SZ);
2996 ctreq->RevisionId.bits.Revision = SLI_CT_REVISION;
2997 ctreq->RevisionId.bits.InId = 0;
2998 ctreq->FsType = SLI_CT_ELX_LOOPBACK;
2999 ctreq->FsSubType = 0;
3000 ctreq->CommandResponse.bits.CmdRsp = ELX_LOOPBACK_DATA;
3001 ctreq->CommandResponse.bits.Size = size;
3002 segment_offset = ELX_LOOPBACK_HEADER_SZ;
3006 BUG_ON(segment_offset >= segment_len);
3007 memcpy(curr->virt + segment_offset,
3008 ptr + current_offset,
3009 segment_len - segment_offset);
3011 current_offset += segment_len - segment_offset;
3012 BUG_ON(current_offset > size);
3016 /* Build the XMIT_SEQUENCE iocb */
3017 num_bde = (uint32_t)txbuffer->flag;
3019 cmd->un.xseq64.bdl.addrHigh = putPaddrHigh(txbmp->phys);
3020 cmd->un.xseq64.bdl.addrLow = putPaddrLow(txbmp->phys);
3021 cmd->un.xseq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
3022 cmd->un.xseq64.bdl.bdeSize = (num_bde * sizeof(struct ulp_bde64));
3024 cmd->un.xseq64.w5.hcsw.Fctl = (LS | LA);
3025 cmd->un.xseq64.w5.hcsw.Dfctl = 0;
3026 cmd->un.xseq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CTL;
3027 cmd->un.xseq64.w5.hcsw.Type = FC_TYPE_CT;
3029 cmd->ulpCommand = CMD_XMIT_SEQUENCE64_CX;
3030 cmd->ulpBdeCount = 1;
3032 cmd->ulpClass = CLASS3;
3034 if (phba->sli_rev < LPFC_SLI_REV4) {
3035 cmd->ulpContext = txxri;
3037 cmd->un.xseq64.bdl.ulpIoTag32 = 0;
3038 cmd->un.ulpWord[3] = phba->sli4_hba.rpi_ids[rpi];
3039 cmdiocbq->context3 = txbmp;
3040 cmdiocbq->sli4_xritag = NO_XRI;
3041 cmd->unsli3.rcvsli3.ox_id = 0xffff;
3043 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
3044 cmdiocbq->vport = phba->pport;
3045 iocb_stat = lpfc_sli_issue_iocb_wait(phba, LPFC_ELS_RING, cmdiocbq,
3046 rspiocbq, (phba->fc_ratov * 2) +
3049 if ((iocb_stat != IOCB_SUCCESS) || ((phba->sli_rev < LPFC_SLI_REV4) &&
3050 (rsp->ulpStatus != IOCB_SUCCESS))) {
3051 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3052 "3126 Failed loopback test issue iocb: "
3053 "iocb_stat:x%x\n", iocb_stat);
3055 goto err_loopback_test_exit;
3059 time_left = wait_event_interruptible_timeout(
3060 evt->wq, !list_empty(&evt->events_to_see),
3061 ((phba->fc_ratov * 2) + LPFC_DRVR_TIMEOUT) * HZ);
3063 if (list_empty(&evt->events_to_see)) {
3064 rc = (time_left) ? -EINTR : -ETIMEDOUT;
3065 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3066 "3125 Not receiving unsolicited event, "
3069 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3070 list_move(evt->events_to_see.prev, &evt->events_to_get);
3071 evdat = list_entry(evt->events_to_get.prev,
3072 typeof(*evdat), node);
3073 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3074 rx_databuf = evdat->data;
3075 if (evdat->len != full_size) {
3076 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3077 "1603 Loopback test did not receive expected "
3078 "data length. actual length 0x%x expected "
3080 evdat->len, full_size);
3082 } else if (rx_databuf == NULL)
3086 /* skip over elx loopback header */
3087 rx_databuf += ELX_LOOPBACK_HEADER_SZ;
3088 job->reply->reply_payload_rcv_len =
3089 sg_copy_from_buffer(job->reply_payload.sg_list,
3090 job->reply_payload.sg_cnt,
3092 job->reply->reply_payload_rcv_len = size;
3096 err_loopback_test_exit:
3097 lpfcdiag_loop_self_unreg(phba, rpi);
3099 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3100 lpfc_bsg_event_unref(evt); /* release ref */
3101 lpfc_bsg_event_unref(evt); /* delete */
3102 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3104 if (cmdiocbq != NULL)
3105 lpfc_sli_release_iocbq(phba, cmdiocbq);
3107 if (rspiocbq != NULL)
3108 lpfc_sli_release_iocbq(phba, rspiocbq);
3110 if (txbmp != NULL) {
3111 if (txbpl != NULL) {
3112 if (txbuffer != NULL)
3113 diag_cmd_data_free(phba, txbuffer);
3114 lpfc_mbuf_free(phba, txbmp->virt, txbmp->phys);
3121 /* make error code available to userspace */
3122 job->reply->result = rc;
3123 job->dd_data = NULL;
3124 /* complete the job back to userspace if no error */
3125 if (rc == IOCB_SUCCESS)
3131 * lpfc_bsg_get_dfc_rev - process a GET_DFC_REV bsg vendor command
3132 * @job: GET_DFC_REV fc_bsg_job
3135 lpfc_bsg_get_dfc_rev(struct fc_bsg_job *job)
3137 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
3138 struct lpfc_hba *phba = vport->phba;
3139 struct get_mgmt_rev *event_req;
3140 struct get_mgmt_rev_reply *event_reply;
3143 if (job->request_len <
3144 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev)) {
3145 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3146 "2740 Received GET_DFC_REV request below "
3152 event_req = (struct get_mgmt_rev *)
3153 job->request->rqst_data.h_vendor.vendor_cmd;
3155 event_reply = (struct get_mgmt_rev_reply *)
3156 job->reply->reply_data.vendor_reply.vendor_rsp;
3158 if (job->reply_len <
3159 sizeof(struct fc_bsg_request) + sizeof(struct get_mgmt_rev_reply)) {
3160 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3161 "2741 Received GET_DFC_REV reply below "
3167 event_reply->info.a_Major = MANAGEMENT_MAJOR_REV;
3168 event_reply->info.a_Minor = MANAGEMENT_MINOR_REV;
3170 job->reply->result = rc;
3177 * lpfc_bsg_issue_mbox_cmpl - lpfc_bsg_issue_mbox mbox completion handler
3178 * @phba: Pointer to HBA context object.
3179 * @pmboxq: Pointer to mailbox command.
3181 * This is completion handler function for mailbox commands issued from
3182 * lpfc_bsg_issue_mbox function. This function is called by the
3183 * mailbox event handler function with no lock held. This function
3184 * will wake up thread waiting on the wait queue pointed by context1
3188 lpfc_bsg_issue_mbox_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3190 struct bsg_job_data *dd_data;
3191 struct fc_bsg_job *job;
3193 unsigned long flags;
3194 uint8_t *pmb, *pmb_buf;
3196 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3197 dd_data = pmboxq->context1;
3198 /* job already timed out? */
3200 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3205 * The outgoing buffer is readily referred from the dma buffer,
3206 * just need to get header part from mailboxq structure.
3208 pmb = (uint8_t *)&pmboxq->u.mb;
3209 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3210 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3212 job = dd_data->context_un.mbox.set_job;
3214 size = job->reply_payload.payload_len;
3215 job->reply->reply_payload_rcv_len =
3216 sg_copy_from_buffer(job->reply_payload.sg_list,
3217 job->reply_payload.sg_cnt,
3219 /* need to hold the lock until we set job->dd_data to NULL
3220 * to hold off the timeout handler returning to the mid-layer
3221 * while we are still processing the job.
3223 job->dd_data = NULL;
3224 dd_data->context_un.mbox.set_job = NULL;
3225 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3227 dd_data->context_un.mbox.set_job = NULL;
3228 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3231 mempool_free(dd_data->context_un.mbox.pmboxq, phba->mbox_mem_pool);
3232 lpfc_bsg_dma_page_free(phba, dd_data->context_un.mbox.dmabuffers);
3236 job->reply->result = 0;
3243 * lpfc_bsg_check_cmd_access - test for a supported mailbox command
3244 * @phba: Pointer to HBA context object.
3245 * @mb: Pointer to a mailbox object.
3246 * @vport: Pointer to a vport object.
3248 * Some commands require the port to be offline, some may not be called from
3251 static int lpfc_bsg_check_cmd_access(struct lpfc_hba *phba,
3252 MAILBOX_t *mb, struct lpfc_vport *vport)
3254 /* return negative error values for bsg job */
3255 switch (mb->mbxCommand) {
3259 case MBX_CONFIG_LINK:
3260 case MBX_CONFIG_RING:
3261 case MBX_RESET_RING:
3262 case MBX_UNREG_LOGIN:
3264 case MBX_DUMP_CONTEXT:
3268 if (!(vport->fc_flag & FC_OFFLINE_MODE)) {
3269 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3270 "2743 Command 0x%x is illegal in on-line "
3276 case MBX_WRITE_VPARMS:
3279 case MBX_READ_CONFIG:
3280 case MBX_READ_RCONFIG:
3281 case MBX_READ_STATUS:
3284 case MBX_READ_LNK_STAT:
3285 case MBX_DUMP_MEMORY:
3287 case MBX_UPDATE_CFG:
3288 case MBX_KILL_BOARD:
3290 case MBX_LOAD_EXP_ROM:
3292 case MBX_DEL_LD_ENTRY:
3295 case MBX_SLI4_CONFIG:
3296 case MBX_READ_EVENT_LOG:
3297 case MBX_READ_EVENT_LOG_STATUS:
3298 case MBX_WRITE_EVENT_LOG:
3299 case MBX_PORT_CAPABILITIES:
3300 case MBX_PORT_IOV_CONTROL:
3301 case MBX_RUN_BIU_DIAG64:
3303 case MBX_SET_VARIABLE:
3304 lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
3305 "1226 mbox: set_variable 0x%x, 0x%x\n",
3307 mb->un.varWords[1]);
3308 if ((mb->un.varWords[0] == SETVAR_MLOMNT)
3309 && (mb->un.varWords[1] == 1)) {
3310 phba->wait_4_mlo_maint_flg = 1;
3311 } else if (mb->un.varWords[0] == SETVAR_MLORST) {
3312 spin_lock_irq(&phba->hbalock);
3313 phba->link_flag &= ~LS_LOOPBACK_MODE;
3314 spin_unlock_irq(&phba->hbalock);
3315 phba->fc_topology = LPFC_TOPOLOGY_PT_PT;
3318 case MBX_READ_SPARM64:
3319 case MBX_READ_TOPOLOGY:
3321 case MBX_REG_LOGIN64:
3322 case MBX_CONFIG_PORT:
3323 case MBX_RUN_BIU_DIAG:
3325 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
3326 "2742 Unknown Command 0x%x\n",
3335 * lpfc_bsg_mbox_ext_cleanup - clean up context of multi-buffer mbox session
3336 * @phba: Pointer to HBA context object.
3338 * This is routine clean up and reset BSG handling of multi-buffer mbox
3342 lpfc_bsg_mbox_ext_session_reset(struct lpfc_hba *phba)
3344 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE)
3347 /* free all memory, including dma buffers */
3348 lpfc_bsg_dma_page_list_free(phba,
3349 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3350 lpfc_bsg_dma_page_free(phba, phba->mbox_ext_buf_ctx.mbx_dmabuf);
3351 /* multi-buffer write mailbox command pass-through complete */
3352 memset((char *)&phba->mbox_ext_buf_ctx, 0,
3353 sizeof(struct lpfc_mbox_ext_buf_ctx));
3354 INIT_LIST_HEAD(&phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3360 * lpfc_bsg_issue_mbox_ext_handle_job - job handler for multi-buffer mbox cmpl
3361 * @phba: Pointer to HBA context object.
3362 * @pmboxq: Pointer to mailbox command.
3364 * This is routine handles BSG job for mailbox commands completions with
3365 * multiple external buffers.
3367 static struct fc_bsg_job *
3368 lpfc_bsg_issue_mbox_ext_handle_job(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3370 struct bsg_job_data *dd_data;
3371 struct fc_bsg_job *job;
3372 uint8_t *pmb, *pmb_buf;
3373 unsigned long flags;
3376 struct lpfc_dmabuf *dmabuf;
3377 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3380 spin_lock_irqsave(&phba->ct_ev_lock, flags);
3381 dd_data = pmboxq->context1;
3382 /* has the job already timed out? */
3384 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3390 * The outgoing buffer is readily referred from the dma buffer,
3391 * just need to get header part from mailboxq structure.
3393 pmb = (uint8_t *)&pmboxq->u.mb;
3394 pmb_buf = (uint8_t *)dd_data->context_un.mbox.mb;
3395 /* Copy the byte swapped response mailbox back to the user */
3396 memcpy(pmb_buf, pmb, sizeof(MAILBOX_t));
3397 /* if there is any non-embedded extended data copy that too */
3398 dmabuf = phba->mbox_ext_buf_ctx.mbx_dmabuf;
3399 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3400 if (!bsg_bf_get(lpfc_mbox_hdr_emb,
3401 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
3402 pmbx = (uint8_t *)dmabuf->virt;
3403 /* byte swap the extended data following the mailbox command */
3404 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
3405 &pmbx[sizeof(MAILBOX_t)],
3406 sli_cfg_mbx->un.sli_config_emb0_subsys.mse[0].buf_len);
3409 job = dd_data->context_un.mbox.set_job;
3411 size = job->reply_payload.payload_len;
3412 job->reply->reply_payload_rcv_len =
3413 sg_copy_from_buffer(job->reply_payload.sg_list,
3414 job->reply_payload.sg_cnt,
3416 /* result for successful */
3417 job->reply->result = 0;
3418 job->dd_data = NULL;
3419 /* need to hold the lock util we set job->dd_data to NULL
3420 * to hold off the timeout handler from midlayer to take
3423 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3424 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3425 "2937 SLI_CONFIG ext-buffer maibox command "
3426 "(x%x/x%x) complete bsg job done, bsize:%d\n",
3427 phba->mbox_ext_buf_ctx.nembType,
3428 phba->mbox_ext_buf_ctx.mboxType, size);
3429 lpfc_idiag_mbxacc_dump_bsg_mbox(phba,
3430 phba->mbox_ext_buf_ctx.nembType,
3431 phba->mbox_ext_buf_ctx.mboxType,
3432 dma_ebuf, sta_pos_addr,
3433 phba->mbox_ext_buf_ctx.mbx_dmabuf, 0);
3435 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
3439 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3440 "2938 SLI_CONFIG ext-buffer maibox "
3441 "command (x%x/x%x) failure, rc:x%x\n",
3442 phba->mbox_ext_buf_ctx.nembType,
3443 phba->mbox_ext_buf_ctx.mboxType, rc);
3445 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_DONE;
3452 * lpfc_bsg_issue_read_mbox_ext_cmpl - compl handler for multi-buffer read mbox
3453 * @phba: Pointer to HBA context object.
3454 * @pmboxq: Pointer to mailbox command.
3456 * This is completion handler function for mailbox read commands with multiple
3460 lpfc_bsg_issue_read_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3462 struct fc_bsg_job *job;
3464 /* handle the BSG job with mailbox command */
3465 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
3466 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3468 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3469 "2939 SLI_CONFIG ext-buffer rd maibox command "
3470 "complete, ctxState:x%x, mbxStatus:x%x\n",
3471 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3473 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3475 if (pmboxq->u.mb.mbxStatus || phba->mbox_ext_buf_ctx.numBuf == 1)
3476 lpfc_bsg_mbox_ext_session_reset(phba);
3478 /* free base driver mailbox structure memory */
3479 mempool_free(pmboxq, phba->mbox_mem_pool);
3481 /* complete the bsg job if we have it */
3489 * lpfc_bsg_issue_write_mbox_ext_cmpl - cmpl handler for multi-buffer write mbox
3490 * @phba: Pointer to HBA context object.
3491 * @pmboxq: Pointer to mailbox command.
3493 * This is completion handler function for mailbox write commands with multiple
3497 lpfc_bsg_issue_write_mbox_ext_cmpl(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmboxq)
3499 struct fc_bsg_job *job;
3501 /* handle the BSG job with the mailbox command */
3502 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_ABTS)
3503 pmboxq->u.mb.mbxStatus = MBXERR_ERROR;
3505 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3506 "2940 SLI_CONFIG ext-buffer wr maibox command "
3507 "complete, ctxState:x%x, mbxStatus:x%x\n",
3508 phba->mbox_ext_buf_ctx.state, pmboxq->u.mb.mbxStatus);
3510 job = lpfc_bsg_issue_mbox_ext_handle_job(phba, pmboxq);
3512 /* free all memory, including dma buffers */
3513 mempool_free(pmboxq, phba->mbox_mem_pool);
3514 lpfc_bsg_mbox_ext_session_reset(phba);
3516 /* complete the bsg job if we have it */
3524 lpfc_bsg_sli_cfg_dma_desc_setup(struct lpfc_hba *phba, enum nemb_type nemb_tp,
3525 uint32_t index, struct lpfc_dmabuf *mbx_dmabuf,
3526 struct lpfc_dmabuf *ext_dmabuf)
3528 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3530 /* pointer to the start of mailbox command */
3531 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)mbx_dmabuf->virt;
3533 if (nemb_tp == nemb_mse) {
3535 sli_cfg_mbx->un.sli_config_emb0_subsys.
3537 putPaddrHigh(mbx_dmabuf->phys +
3539 sli_cfg_mbx->un.sli_config_emb0_subsys.
3541 putPaddrLow(mbx_dmabuf->phys +
3543 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3544 "2943 SLI_CONFIG(mse)[%d], "
3545 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3547 sli_cfg_mbx->un.sli_config_emb0_subsys.
3549 sli_cfg_mbx->un.sli_config_emb0_subsys.
3551 sli_cfg_mbx->un.sli_config_emb0_subsys.
3554 sli_cfg_mbx->un.sli_config_emb0_subsys.
3556 putPaddrHigh(ext_dmabuf->phys);
3557 sli_cfg_mbx->un.sli_config_emb0_subsys.
3559 putPaddrLow(ext_dmabuf->phys);
3560 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3561 "2944 SLI_CONFIG(mse)[%d], "
3562 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3564 sli_cfg_mbx->un.sli_config_emb0_subsys.
3566 sli_cfg_mbx->un.sli_config_emb0_subsys.
3568 sli_cfg_mbx->un.sli_config_emb0_subsys.
3573 sli_cfg_mbx->un.sli_config_emb1_subsys.
3575 putPaddrHigh(mbx_dmabuf->phys +
3577 sli_cfg_mbx->un.sli_config_emb1_subsys.
3579 putPaddrLow(mbx_dmabuf->phys +
3581 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3582 "3007 SLI_CONFIG(hbd)[%d], "
3583 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3585 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3587 sli_config_emb1_subsys.hbd[index]),
3588 sli_cfg_mbx->un.sli_config_emb1_subsys.
3590 sli_cfg_mbx->un.sli_config_emb1_subsys.
3594 sli_cfg_mbx->un.sli_config_emb1_subsys.
3596 putPaddrHigh(ext_dmabuf->phys);
3597 sli_cfg_mbx->un.sli_config_emb1_subsys.
3599 putPaddrLow(ext_dmabuf->phys);
3600 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3601 "3008 SLI_CONFIG(hbd)[%d], "
3602 "bufLen:%d, addrHi:x%x, addrLo:x%x\n",
3604 bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3606 sli_config_emb1_subsys.hbd[index]),
3607 sli_cfg_mbx->un.sli_config_emb1_subsys.
3609 sli_cfg_mbx->un.sli_config_emb1_subsys.
3617 * lpfc_bsg_sli_cfg_mse_read_cmd_ext - sli_config non-embedded mailbox cmd read
3618 * @phba: Pointer to HBA context object.
3619 * @mb: Pointer to a BSG mailbox object.
3620 * @nemb_tp: Enumerate of non-embedded mailbox command type.
3621 * @dmabuff: Pointer to a DMA buffer descriptor.
3623 * This routine performs SLI_CONFIG (0x9B) read mailbox command operation with
3624 * non-embedded external bufffers.
3627 lpfc_bsg_sli_cfg_read_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3628 enum nemb_type nemb_tp,
3629 struct lpfc_dmabuf *dmabuf)
3631 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3632 struct dfc_mbox_req *mbox_req;
3633 struct lpfc_dmabuf *curr_dmabuf, *next_dmabuf;
3634 uint32_t ext_buf_cnt, ext_buf_index;
3635 struct lpfc_dmabuf *ext_dmabuf = NULL;
3636 struct bsg_job_data *dd_data = NULL;
3637 LPFC_MBOXQ_t *pmboxq = NULL;
3643 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
3645 /* pointer to the start of mailbox command */
3646 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3648 if (nemb_tp == nemb_mse) {
3649 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3650 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3651 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3652 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3653 "2945 Handled SLI_CONFIG(mse) rd, "
3654 "ext_buf_cnt(%d) out of range(%d)\n",
3656 LPFC_MBX_SLI_CONFIG_MAX_MSE);
3660 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3661 "2941 Handled SLI_CONFIG(mse) rd, "
3662 "ext_buf_cnt:%d\n", ext_buf_cnt);
3664 /* sanity check on interface type for support */
3665 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3666 LPFC_SLI_INTF_IF_TYPE_2) {
3670 /* nemb_tp == nemb_hbd */
3671 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3672 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3673 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3674 "2946 Handled SLI_CONFIG(hbd) rd, "
3675 "ext_buf_cnt(%d) out of range(%d)\n",
3677 LPFC_MBX_SLI_CONFIG_MAX_HBD);
3681 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3682 "2942 Handled SLI_CONFIG(hbd) rd, "
3683 "ext_buf_cnt:%d\n", ext_buf_cnt);
3686 /* before dma descriptor setup */
3687 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3688 sta_pre_addr, dmabuf, ext_buf_cnt);
3690 /* reject non-embedded mailbox command with none external buffer */
3691 if (ext_buf_cnt == 0) {
3694 } else if (ext_buf_cnt > 1) {
3695 /* additional external read buffers */
3696 for (i = 1; i < ext_buf_cnt; i++) {
3697 ext_dmabuf = lpfc_bsg_dma_page_alloc(phba);
3702 list_add_tail(&ext_dmabuf->list,
3703 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3707 /* bsg tracking structure */
3708 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3714 /* mailbox command structure for base driver */
3715 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3720 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3722 /* for the first external buffer */
3723 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3725 /* for the rest of external buffer descriptors if any */
3726 if (ext_buf_cnt > 1) {
3728 list_for_each_entry_safe(curr_dmabuf, next_dmabuf,
3729 &phba->mbox_ext_buf_ctx.ext_dmabuf_list, list) {
3730 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp,
3731 ext_buf_index, dmabuf,
3737 /* after dma descriptor setup */
3738 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_rd, dma_mbox,
3739 sta_pos_addr, dmabuf, ext_buf_cnt);
3741 /* construct base driver mbox command */
3742 pmb = &pmboxq->u.mb;
3743 pmbx = (uint8_t *)dmabuf->virt;
3744 memcpy(pmb, pmbx, sizeof(*pmb));
3745 pmb->mbxOwner = OWN_HOST;
3746 pmboxq->vport = phba->pport;
3748 /* multi-buffer handling context */
3749 phba->mbox_ext_buf_ctx.nembType = nemb_tp;
3750 phba->mbox_ext_buf_ctx.mboxType = mbox_rd;
3751 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
3752 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
3753 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
3754 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
3756 /* callback for multi-buffer read mailbox command */
3757 pmboxq->mbox_cmpl = lpfc_bsg_issue_read_mbox_ext_cmpl;
3759 /* context fields to callback function */
3760 pmboxq->context1 = dd_data;
3761 dd_data->type = TYPE_MBOX;
3762 dd_data->context_un.mbox.pmboxq = pmboxq;
3763 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
3764 dd_data->context_un.mbox.set_job = job;
3765 job->dd_data = dd_data;
3768 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3771 * Non-embedded mailbox subcommand data gets byte swapped here because
3772 * the lower level driver code only does the first 64 mailbox words.
3774 if ((!bsg_bf_get(lpfc_mbox_hdr_emb,
3775 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) &&
3776 (nemb_tp == nemb_mse))
3777 lpfc_sli_pcimem_bcopy(&pmbx[sizeof(MAILBOX_t)],
3778 &pmbx[sizeof(MAILBOX_t)],
3779 sli_cfg_mbx->un.sli_config_emb0_subsys.
3782 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3783 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3784 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3785 "2947 Issued SLI_CONFIG ext-buffer "
3786 "maibox command, rc:x%x\n", rc);
3787 return SLI_CONFIG_HANDLED;
3789 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3790 "2948 Failed to issue SLI_CONFIG ext-buffer "
3791 "maibox command, rc:x%x\n", rc);
3796 mempool_free(pmboxq, phba->mbox_mem_pool);
3797 lpfc_bsg_dma_page_list_free(phba,
3798 &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
3800 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
3805 * lpfc_bsg_sli_cfg_write_cmd_ext - sli_config non-embedded mailbox cmd write
3806 * @phba: Pointer to HBA context object.
3807 * @mb: Pointer to a BSG mailbox object.
3808 * @dmabuff: Pointer to a DMA buffer descriptor.
3810 * This routine performs SLI_CONFIG (0x9B) write mailbox command operation with
3811 * non-embedded external bufffers.
3814 lpfc_bsg_sli_cfg_write_cmd_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
3815 enum nemb_type nemb_tp,
3816 struct lpfc_dmabuf *dmabuf)
3818 struct dfc_mbox_req *mbox_req;
3819 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3820 uint32_t ext_buf_cnt;
3821 struct bsg_job_data *dd_data = NULL;
3822 LPFC_MBOXQ_t *pmboxq = NULL;
3825 int rc = SLI_CONFIG_NOT_HANDLED, i;
3828 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
3830 /* pointer to the start of mailbox command */
3831 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3833 if (nemb_tp == nemb_mse) {
3834 ext_buf_cnt = bsg_bf_get(lpfc_mbox_hdr_mse_cnt,
3835 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr);
3836 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_MSE) {
3837 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3838 "2953 Failed SLI_CONFIG(mse) wr, "
3839 "ext_buf_cnt(%d) out of range(%d)\n",
3841 LPFC_MBX_SLI_CONFIG_MAX_MSE);
3844 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3845 "2949 Handled SLI_CONFIG(mse) wr, "
3846 "ext_buf_cnt:%d\n", ext_buf_cnt);
3848 /* sanity check on interface type for support */
3849 if (bf_get(lpfc_sli_intf_if_type, &phba->sli4_hba.sli_intf) !=
3850 LPFC_SLI_INTF_IF_TYPE_2)
3852 /* nemb_tp == nemb_hbd */
3853 ext_buf_cnt = sli_cfg_mbx->un.sli_config_emb1_subsys.hbd_count;
3854 if (ext_buf_cnt > LPFC_MBX_SLI_CONFIG_MAX_HBD) {
3855 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3856 "2954 Failed SLI_CONFIG(hbd) wr, "
3857 "ext_buf_cnt(%d) out of range(%d)\n",
3859 LPFC_MBX_SLI_CONFIG_MAX_HBD);
3862 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3863 "2950 Handled SLI_CONFIG(hbd) wr, "
3864 "ext_buf_cnt:%d\n", ext_buf_cnt);
3867 /* before dma buffer descriptor setup */
3868 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
3869 sta_pre_addr, dmabuf, ext_buf_cnt);
3871 if (ext_buf_cnt == 0)
3874 /* for the first external buffer */
3875 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, 0, dmabuf, dmabuf);
3877 /* after dma descriptor setup */
3878 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, nemb_tp, mbox_wr, dma_mbox,
3879 sta_pos_addr, dmabuf, ext_buf_cnt);
3881 /* log for looking forward */
3882 for (i = 1; i < ext_buf_cnt; i++) {
3883 if (nemb_tp == nemb_mse)
3884 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3885 "2951 SLI_CONFIG(mse), buf[%d]-length:%d\n",
3886 i, sli_cfg_mbx->un.sli_config_emb0_subsys.
3889 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3890 "2952 SLI_CONFIG(hbd), buf[%d]-length:%d\n",
3891 i, bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
3892 &sli_cfg_mbx->un.sli_config_emb1_subsys.
3896 /* multi-buffer handling context */
3897 phba->mbox_ext_buf_ctx.nembType = nemb_tp;
3898 phba->mbox_ext_buf_ctx.mboxType = mbox_wr;
3899 phba->mbox_ext_buf_ctx.numBuf = ext_buf_cnt;
3900 phba->mbox_ext_buf_ctx.mbxTag = mbox_req->extMboxTag;
3901 phba->mbox_ext_buf_ctx.seqNum = mbox_req->extSeqNum;
3902 phba->mbox_ext_buf_ctx.mbx_dmabuf = dmabuf;
3904 if (ext_buf_cnt == 1) {
3905 /* bsg tracking structure */
3906 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
3912 /* mailbox command structure for base driver */
3913 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
3918 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
3919 pmb = &pmboxq->u.mb;
3920 mbx = (uint8_t *)dmabuf->virt;
3921 memcpy(pmb, mbx, sizeof(*pmb));
3922 pmb->mbxOwner = OWN_HOST;
3923 pmboxq->vport = phba->pport;
3925 /* callback for multi-buffer read mailbox command */
3926 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
3928 /* context fields to callback function */
3929 pmboxq->context1 = dd_data;
3930 dd_data->type = TYPE_MBOX;
3931 dd_data->context_un.mbox.pmboxq = pmboxq;
3932 dd_data->context_un.mbox.mb = (MAILBOX_t *)mbx;
3933 dd_data->context_un.mbox.set_job = job;
3934 job->dd_data = dd_data;
3937 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
3939 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
3940 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
3941 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
3942 "2955 Issued SLI_CONFIG ext-buffer "
3943 "maibox command, rc:x%x\n", rc);
3944 return SLI_CONFIG_HANDLED;
3946 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
3947 "2956 Failed to issue SLI_CONFIG ext-buffer "
3948 "maibox command, rc:x%x\n", rc);
3953 /* wait for additoinal external buffers */
3954 job->reply->result = 0;
3956 return SLI_CONFIG_HANDLED;
3960 mempool_free(pmboxq, phba->mbox_mem_pool);
3967 * lpfc_bsg_handle_sli_cfg_mbox - handle sli-cfg mailbox cmd with ext buffer
3968 * @phba: Pointer to HBA context object.
3969 * @mb: Pointer to a BSG mailbox object.
3970 * @dmabuff: Pointer to a DMA buffer descriptor.
3972 * This routine handles SLI_CONFIG (0x9B) mailbox command with non-embedded
3973 * external bufffers, including both 0x9B with non-embedded MSEs and 0x9B
3974 * with embedded sussystem 0x1 and opcodes with external HBDs.
3977 lpfc_bsg_handle_sli_cfg_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
3978 struct lpfc_dmabuf *dmabuf)
3980 struct lpfc_sli_config_mbox *sli_cfg_mbx;
3983 int rc = SLI_CONFIG_NOT_HANDLED;
3985 /* state change on new multi-buffer pass-through mailbox command */
3986 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_HOST;
3988 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)dmabuf->virt;
3990 if (!bsg_bf_get(lpfc_mbox_hdr_emb,
3991 &sli_cfg_mbx->un.sli_config_emb0_subsys.sli_config_hdr)) {
3992 subsys = bsg_bf_get(lpfc_emb0_subcmnd_subsys,
3993 &sli_cfg_mbx->un.sli_config_emb0_subsys);
3994 opcode = bsg_bf_get(lpfc_emb0_subcmnd_opcode,
3995 &sli_cfg_mbx->un.sli_config_emb0_subsys);
3996 if (subsys == SLI_CONFIG_SUBSYS_FCOE) {
3998 case FCOE_OPCODE_READ_FCF:
3999 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4000 "2957 Handled SLI_CONFIG "
4001 "subsys_fcoe, opcode:x%x\n",
4003 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4006 case FCOE_OPCODE_ADD_FCF:
4007 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4008 "2958 Handled SLI_CONFIG "
4009 "subsys_fcoe, opcode:x%x\n",
4011 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
4015 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4016 "2959 Reject SLI_CONFIG "
4017 "subsys_fcoe, opcode:x%x\n",
4022 } else if (subsys == SLI_CONFIG_SUBSYS_COMN) {
4024 case COMN_OPCODE_GET_CNTL_ADDL_ATTRIBUTES:
4025 case COMN_OPCODE_GET_CNTL_ATTRIBUTES:
4026 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4027 "3106 Handled SLI_CONFIG "
4028 "subsys_comn, opcode:x%x\n",
4030 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4034 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4035 "3107 Reject SLI_CONFIG "
4036 "subsys_comn, opcode:x%x\n",
4042 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4043 "2977 Reject SLI_CONFIG "
4044 "subsys:x%d, opcode:x%x\n",
4049 subsys = bsg_bf_get(lpfc_emb1_subcmnd_subsys,
4050 &sli_cfg_mbx->un.sli_config_emb1_subsys);
4051 opcode = bsg_bf_get(lpfc_emb1_subcmnd_opcode,
4052 &sli_cfg_mbx->un.sli_config_emb1_subsys);
4053 if (subsys == SLI_CONFIG_SUBSYS_COMN) {
4055 case COMN_OPCODE_READ_OBJECT:
4056 case COMN_OPCODE_READ_OBJECT_LIST:
4057 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4058 "2960 Handled SLI_CONFIG "
4059 "subsys_comn, opcode:x%x\n",
4061 rc = lpfc_bsg_sli_cfg_read_cmd_ext(phba, job,
4064 case COMN_OPCODE_WRITE_OBJECT:
4065 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4066 "2961 Handled SLI_CONFIG "
4067 "subsys_comn, opcode:x%x\n",
4069 rc = lpfc_bsg_sli_cfg_write_cmd_ext(phba, job,
4073 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4074 "2962 Not handled SLI_CONFIG "
4075 "subsys_comn, opcode:x%x\n",
4077 rc = SLI_CONFIG_NOT_HANDLED;
4081 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4082 "2978 Not handled SLI_CONFIG "
4083 "subsys:x%d, opcode:x%x\n",
4085 rc = SLI_CONFIG_NOT_HANDLED;
4089 /* state reset on not handled new multi-buffer mailbox command */
4090 if (rc != SLI_CONFIG_HANDLED)
4091 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_IDLE;
4097 * lpfc_bsg_mbox_ext_abort_req - request to abort mbox command with ext buffers
4098 * @phba: Pointer to HBA context object.
4100 * This routine is for requesting to abort a pass-through mailbox command with
4101 * multiple external buffers due to error condition.
4104 lpfc_bsg_mbox_ext_abort(struct lpfc_hba *phba)
4106 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
4107 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
4109 lpfc_bsg_mbox_ext_session_reset(phba);
4114 * lpfc_bsg_read_ebuf_get - get the next mailbox read external buffer
4115 * @phba: Pointer to HBA context object.
4116 * @dmabuf: Pointer to a DMA buffer descriptor.
4118 * This routine extracts the next mailbox read external buffer back to
4119 * user space through BSG.
4122 lpfc_bsg_read_ebuf_get(struct lpfc_hba *phba, struct fc_bsg_job *job)
4124 struct lpfc_sli_config_mbox *sli_cfg_mbx;
4125 struct lpfc_dmabuf *dmabuf;
4130 index = phba->mbox_ext_buf_ctx.seqNum;
4131 phba->mbox_ext_buf_ctx.seqNum++;
4133 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
4134 phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4136 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4137 size = bsg_bf_get(lpfc_mbox_sli_config_mse_len,
4138 &sli_cfg_mbx->un.sli_config_emb0_subsys.mse[index]);
4139 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4140 "2963 SLI_CONFIG (mse) ext-buffer rd get "
4141 "buffer[%d], size:%d\n", index, size);
4143 size = bsg_bf_get(lpfc_mbox_sli_config_ecmn_hbd_len,
4144 &sli_cfg_mbx->un.sli_config_emb1_subsys.hbd[index]);
4145 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4146 "2964 SLI_CONFIG (hbd) ext-buffer rd get "
4147 "buffer[%d], size:%d\n", index, size);
4149 if (list_empty(&phba->mbox_ext_buf_ctx.ext_dmabuf_list))
4151 dmabuf = list_first_entry(&phba->mbox_ext_buf_ctx.ext_dmabuf_list,
4152 struct lpfc_dmabuf, list);
4153 list_del_init(&dmabuf->list);
4155 /* after dma buffer descriptor setup */
4156 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
4157 mbox_rd, dma_ebuf, sta_pos_addr,
4160 pbuf = (uint8_t *)dmabuf->virt;
4161 job->reply->reply_payload_rcv_len =
4162 sg_copy_from_buffer(job->reply_payload.sg_list,
4163 job->reply_payload.sg_cnt,
4166 lpfc_bsg_dma_page_free(phba, dmabuf);
4168 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4169 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4170 "2965 SLI_CONFIG (hbd) ext-buffer rd mbox "
4171 "command session done\n");
4172 lpfc_bsg_mbox_ext_session_reset(phba);
4175 job->reply->result = 0;
4178 return SLI_CONFIG_HANDLED;
4182 * lpfc_bsg_write_ebuf_set - set the next mailbox write external buffer
4183 * @phba: Pointer to HBA context object.
4184 * @dmabuf: Pointer to a DMA buffer descriptor.
4186 * This routine sets up the next mailbox read external buffer obtained
4187 * from user space through BSG.
4190 lpfc_bsg_write_ebuf_set(struct lpfc_hba *phba, struct fc_bsg_job *job,
4191 struct lpfc_dmabuf *dmabuf)
4193 struct lpfc_sli_config_mbox *sli_cfg_mbx;
4194 struct bsg_job_data *dd_data = NULL;
4195 LPFC_MBOXQ_t *pmboxq = NULL;
4197 enum nemb_type nemb_tp;
4203 index = phba->mbox_ext_buf_ctx.seqNum;
4204 phba->mbox_ext_buf_ctx.seqNum++;
4205 nemb_tp = phba->mbox_ext_buf_ctx.nembType;
4207 sli_cfg_mbx = (struct lpfc_sli_config_mbox *)
4208 phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4210 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4216 pbuf = (uint8_t *)dmabuf->virt;
4217 size = job->request_payload.payload_len;
4218 sg_copy_to_buffer(job->request_payload.sg_list,
4219 job->request_payload.sg_cnt,
4222 if (phba->mbox_ext_buf_ctx.nembType == nemb_mse) {
4223 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4224 "2966 SLI_CONFIG (mse) ext-buffer wr set "
4225 "buffer[%d], size:%d\n",
4226 phba->mbox_ext_buf_ctx.seqNum, size);
4229 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4230 "2967 SLI_CONFIG (hbd) ext-buffer wr set "
4231 "buffer[%d], size:%d\n",
4232 phba->mbox_ext_buf_ctx.seqNum, size);
4236 /* set up external buffer descriptor and add to external buffer list */
4237 lpfc_bsg_sli_cfg_dma_desc_setup(phba, nemb_tp, index,
4238 phba->mbox_ext_buf_ctx.mbx_dmabuf,
4240 list_add_tail(&dmabuf->list, &phba->mbox_ext_buf_ctx.ext_dmabuf_list);
4242 /* after write dma buffer */
4243 lpfc_idiag_mbxacc_dump_bsg_mbox(phba, phba->mbox_ext_buf_ctx.nembType,
4244 mbox_wr, dma_ebuf, sta_pos_addr,
4247 if (phba->mbox_ext_buf_ctx.seqNum == phba->mbox_ext_buf_ctx.numBuf) {
4248 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4249 "2968 SLI_CONFIG ext-buffer wr all %d "
4250 "ebuffers received\n",
4251 phba->mbox_ext_buf_ctx.numBuf);
4252 /* mailbox command structure for base driver */
4253 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4258 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4259 pbuf = (uint8_t *)phba->mbox_ext_buf_ctx.mbx_dmabuf->virt;
4260 pmb = &pmboxq->u.mb;
4261 memcpy(pmb, pbuf, sizeof(*pmb));
4262 pmb->mbxOwner = OWN_HOST;
4263 pmboxq->vport = phba->pport;
4265 /* callback for multi-buffer write mailbox command */
4266 pmboxq->mbox_cmpl = lpfc_bsg_issue_write_mbox_ext_cmpl;
4268 /* context fields to callback function */
4269 pmboxq->context1 = dd_data;
4270 dd_data->type = TYPE_MBOX;
4271 dd_data->context_un.mbox.pmboxq = pmboxq;
4272 dd_data->context_un.mbox.mb = (MAILBOX_t *)pbuf;
4273 dd_data->context_un.mbox.set_job = job;
4274 job->dd_data = dd_data;
4277 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_PORT;
4279 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4280 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY)) {
4281 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4282 "2969 Issued SLI_CONFIG ext-buffer "
4283 "maibox command, rc:x%x\n", rc);
4284 return SLI_CONFIG_HANDLED;
4286 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4287 "2970 Failed to issue SLI_CONFIG ext-buffer "
4288 "maibox command, rc:x%x\n", rc);
4293 /* wait for additoinal external buffers */
4294 job->reply->result = 0;
4296 return SLI_CONFIG_HANDLED;
4299 lpfc_bsg_dma_page_free(phba, dmabuf);
4306 * lpfc_bsg_handle_sli_cfg_ebuf - handle ext buffer with sli-cfg mailbox cmd
4307 * @phba: Pointer to HBA context object.
4308 * @mb: Pointer to a BSG mailbox object.
4309 * @dmabuff: Pointer to a DMA buffer descriptor.
4311 * This routine handles the external buffer with SLI_CONFIG (0x9B) mailbox
4312 * command with multiple non-embedded external buffers.
4315 lpfc_bsg_handle_sli_cfg_ebuf(struct lpfc_hba *phba, struct fc_bsg_job *job,
4316 struct lpfc_dmabuf *dmabuf)
4320 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4321 "2971 SLI_CONFIG buffer (type:x%x)\n",
4322 phba->mbox_ext_buf_ctx.mboxType);
4324 if (phba->mbox_ext_buf_ctx.mboxType == mbox_rd) {
4325 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_DONE) {
4326 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4327 "2972 SLI_CONFIG rd buffer state "
4329 phba->mbox_ext_buf_ctx.state);
4330 lpfc_bsg_mbox_ext_abort(phba);
4333 rc = lpfc_bsg_read_ebuf_get(phba, job);
4334 if (rc == SLI_CONFIG_HANDLED)
4335 lpfc_bsg_dma_page_free(phba, dmabuf);
4336 } else { /* phba->mbox_ext_buf_ctx.mboxType == mbox_wr */
4337 if (phba->mbox_ext_buf_ctx.state != LPFC_BSG_MBOX_HOST) {
4338 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4339 "2973 SLI_CONFIG wr buffer state "
4341 phba->mbox_ext_buf_ctx.state);
4342 lpfc_bsg_mbox_ext_abort(phba);
4345 rc = lpfc_bsg_write_ebuf_set(phba, job, dmabuf);
4351 * lpfc_bsg_handle_sli_cfg_ext - handle sli-cfg mailbox with external buffer
4352 * @phba: Pointer to HBA context object.
4353 * @mb: Pointer to a BSG mailbox object.
4354 * @dmabuff: Pointer to a DMA buffer descriptor.
4356 * This routine checkes and handles non-embedded multi-buffer SLI_CONFIG
4357 * (0x9B) mailbox commands and external buffers.
4360 lpfc_bsg_handle_sli_cfg_ext(struct lpfc_hba *phba, struct fc_bsg_job *job,
4361 struct lpfc_dmabuf *dmabuf)
4363 struct dfc_mbox_req *mbox_req;
4364 int rc = SLI_CONFIG_NOT_HANDLED;
4367 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
4369 /* mbox command with/without single external buffer */
4370 if (mbox_req->extMboxTag == 0 && mbox_req->extSeqNum == 0)
4373 /* mbox command and first external buffer */
4374 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_IDLE) {
4375 if (mbox_req->extSeqNum == 1) {
4376 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4377 "2974 SLI_CONFIG mailbox: tag:%d, "
4378 "seq:%d\n", mbox_req->extMboxTag,
4379 mbox_req->extSeqNum);
4380 rc = lpfc_bsg_handle_sli_cfg_mbox(phba, job, dmabuf);
4383 goto sli_cfg_ext_error;
4387 * handle additional external buffers
4390 /* check broken pipe conditions */
4391 if (mbox_req->extMboxTag != phba->mbox_ext_buf_ctx.mbxTag)
4392 goto sli_cfg_ext_error;
4393 if (mbox_req->extSeqNum > phba->mbox_ext_buf_ctx.numBuf)
4394 goto sli_cfg_ext_error;
4395 if (mbox_req->extSeqNum != phba->mbox_ext_buf_ctx.seqNum + 1)
4396 goto sli_cfg_ext_error;
4398 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4399 "2975 SLI_CONFIG mailbox external buffer: "
4400 "extSta:x%x, tag:%d, seq:%d\n",
4401 phba->mbox_ext_buf_ctx.state, mbox_req->extMboxTag,
4402 mbox_req->extSeqNum);
4403 rc = lpfc_bsg_handle_sli_cfg_ebuf(phba, job, dmabuf);
4407 /* all other cases, broken pipe */
4408 lpfc_printf_log(phba, KERN_ERR, LOG_LIBDFC,
4409 "2976 SLI_CONFIG mailbox broken pipe: "
4410 "ctxSta:x%x, ctxNumBuf:%d "
4411 "ctxTag:%d, ctxSeq:%d, tag:%d, seq:%d\n",
4412 phba->mbox_ext_buf_ctx.state,
4413 phba->mbox_ext_buf_ctx.numBuf,
4414 phba->mbox_ext_buf_ctx.mbxTag,
4415 phba->mbox_ext_buf_ctx.seqNum,
4416 mbox_req->extMboxTag, mbox_req->extSeqNum);
4418 lpfc_bsg_mbox_ext_session_reset(phba);
4424 * lpfc_bsg_issue_mbox - issues a mailbox command on behalf of an app
4425 * @phba: Pointer to HBA context object.
4426 * @mb: Pointer to a mailbox object.
4427 * @vport: Pointer to a vport object.
4429 * Allocate a tracking object, mailbox command memory, get a mailbox
4430 * from the mailbox pool, copy the caller mailbox command.
4432 * If offline and the sli is active we need to poll for the command (port is
4433 * being reset) and com-plete the job, otherwise issue the mailbox command and
4434 * let our completion handler finish the command.
4437 lpfc_bsg_issue_mbox(struct lpfc_hba *phba, struct fc_bsg_job *job,
4438 struct lpfc_vport *vport)
4440 LPFC_MBOXQ_t *pmboxq = NULL; /* internal mailbox queue */
4441 MAILBOX_t *pmb; /* shortcut to the pmboxq mailbox */
4442 /* a 4k buffer to hold the mb and extended data from/to the bsg */
4443 uint8_t *pmbx = NULL;
4444 struct bsg_job_data *dd_data = NULL; /* bsg data tracking structure */
4445 struct lpfc_dmabuf *dmabuf = NULL;
4446 struct dfc_mbox_req *mbox_req;
4447 struct READ_EVENT_LOG_VAR *rdEventLog;
4448 uint32_t transmit_length, receive_length, mode;
4449 struct lpfc_mbx_sli4_config *sli4_config;
4450 struct lpfc_mbx_nembed_cmd *nembed_sge;
4451 struct mbox_header *header;
4452 struct ulp_bde64 *bde;
4453 uint8_t *ext = NULL;
4459 /* in case no data is transferred */
4460 job->reply->reply_payload_rcv_len = 0;
4462 /* sanity check to protect driver */
4463 if (job->reply_payload.payload_len > BSG_MBOX_SIZE ||
4464 job->request_payload.payload_len > BSG_MBOX_SIZE) {
4470 * Don't allow mailbox commands to be sent when blocked or when in
4471 * the middle of discovery
4473 if (phba->sli.sli_flag & LPFC_BLOCK_MGMT_IO) {
4479 (struct dfc_mbox_req *)job->request->rqst_data.h_vendor.vendor_cmd;
4481 /* check if requested extended data lengths are valid */
4482 if ((mbox_req->inExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t)) ||
4483 (mbox_req->outExtWLen > BSG_MBOX_SIZE/sizeof(uint32_t))) {
4488 dmabuf = lpfc_bsg_dma_page_alloc(phba);
4489 if (!dmabuf || !dmabuf->virt) {
4494 /* Get the mailbox command or external buffer from BSG */
4495 pmbx = (uint8_t *)dmabuf->virt;
4496 size = job->request_payload.payload_len;
4497 sg_copy_to_buffer(job->request_payload.sg_list,
4498 job->request_payload.sg_cnt, pmbx, size);
4500 /* Handle possible SLI_CONFIG with non-embedded payloads */
4501 if (phba->sli_rev == LPFC_SLI_REV4) {
4502 rc = lpfc_bsg_handle_sli_cfg_ext(phba, job, dmabuf);
4503 if (rc == SLI_CONFIG_HANDLED)
4507 /* SLI_CONFIG_NOT_HANDLED for other mailbox commands */
4510 rc = lpfc_bsg_check_cmd_access(phba, (MAILBOX_t *)pmbx, vport);
4512 goto job_done; /* must be negative */
4514 /* allocate our bsg tracking structure */
4515 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4517 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4518 "2727 Failed allocation of dd_data\n");
4523 pmboxq = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL);
4528 memset(pmboxq, 0, sizeof(LPFC_MBOXQ_t));
4530 pmb = &pmboxq->u.mb;
4531 memcpy(pmb, pmbx, sizeof(*pmb));
4532 pmb->mbxOwner = OWN_HOST;
4533 pmboxq->vport = vport;
4535 /* If HBA encountered an error attention, allow only DUMP
4536 * or RESTART mailbox commands until the HBA is restarted.
4538 if (phba->pport->stopped &&
4539 pmb->mbxCommand != MBX_DUMP_MEMORY &&
4540 pmb->mbxCommand != MBX_RESTART &&
4541 pmb->mbxCommand != MBX_WRITE_VPARMS &&
4542 pmb->mbxCommand != MBX_WRITE_WWN)
4543 lpfc_printf_log(phba, KERN_WARNING, LOG_MBOX,
4544 "2797 mbox: Issued mailbox cmd "
4545 "0x%x while in stopped state.\n",
4548 /* extended mailbox commands will need an extended buffer */
4549 if (mbox_req->inExtWLen || mbox_req->outExtWLen) {
4551 ext = from + sizeof(MAILBOX_t);
4552 pmboxq->context2 = ext;
4553 pmboxq->in_ext_byte_len =
4554 mbox_req->inExtWLen * sizeof(uint32_t);
4555 pmboxq->out_ext_byte_len =
4556 mbox_req->outExtWLen * sizeof(uint32_t);
4557 pmboxq->mbox_offset_word = mbox_req->mbOffset;
4560 /* biu diag will need a kernel buffer to transfer the data
4561 * allocate our own buffer and setup the mailbox command to
4564 if (pmb->mbxCommand == MBX_RUN_BIU_DIAG64) {
4565 transmit_length = pmb->un.varWords[1];
4566 receive_length = pmb->un.varWords[4];
4567 /* transmit length cannot be greater than receive length or
4568 * mailbox extension size
4570 if ((transmit_length > receive_length) ||
4571 (transmit_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4575 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrHigh =
4576 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t));
4577 pmb->un.varBIUdiag.un.s2.xmit_bde64.addrLow =
4578 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t));
4580 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrHigh =
4581 putPaddrHigh(dmabuf->phys + sizeof(MAILBOX_t)
4582 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4583 pmb->un.varBIUdiag.un.s2.rcv_bde64.addrLow =
4584 putPaddrLow(dmabuf->phys + sizeof(MAILBOX_t)
4585 + pmb->un.varBIUdiag.un.s2.xmit_bde64.tus.f.bdeSize);
4586 } else if (pmb->mbxCommand == MBX_READ_EVENT_LOG) {
4587 rdEventLog = &pmb->un.varRdEventLog;
4588 receive_length = rdEventLog->rcv_bde64.tus.f.bdeSize;
4589 mode = bf_get(lpfc_event_log, rdEventLog);
4591 /* receive length cannot be greater than mailbox
4594 if (receive_length > BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4599 /* mode zero uses a bde like biu diags command */
4601 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4602 + sizeof(MAILBOX_t));
4603 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4604 + sizeof(MAILBOX_t));
4606 } else if (phba->sli_rev == LPFC_SLI_REV4) {
4607 /* Let type 4 (well known data) through because the data is
4608 * returned in varwords[4-8]
4609 * otherwise check the recieve length and fetch the buffer addr
4611 if ((pmb->mbxCommand == MBX_DUMP_MEMORY) &&
4612 (pmb->un.varDmp.type != DMP_WELL_KNOWN)) {
4613 /* rebuild the command for sli4 using our own buffers
4614 * like we do for biu diags
4616 receive_length = pmb->un.varWords[2];
4617 /* receive length cannot be greater than mailbox
4620 if (receive_length == 0) {
4624 pmb->un.varWords[3] = putPaddrLow(dmabuf->phys
4625 + sizeof(MAILBOX_t));
4626 pmb->un.varWords[4] = putPaddrHigh(dmabuf->phys
4627 + sizeof(MAILBOX_t));
4628 } else if ((pmb->mbxCommand == MBX_UPDATE_CFG) &&
4629 pmb->un.varUpdateCfg.co) {
4630 bde = (struct ulp_bde64 *)&pmb->un.varWords[4];
4632 /* bde size cannot be greater than mailbox ext size */
4633 if (bde->tus.f.bdeSize >
4634 BSG_MBOX_SIZE - sizeof(MAILBOX_t)) {
4638 bde->addrHigh = putPaddrHigh(dmabuf->phys
4639 + sizeof(MAILBOX_t));
4640 bde->addrLow = putPaddrLow(dmabuf->phys
4641 + sizeof(MAILBOX_t));
4642 } else if (pmb->mbxCommand == MBX_SLI4_CONFIG) {
4643 /* Handling non-embedded SLI_CONFIG mailbox command */
4644 sli4_config = &pmboxq->u.mqe.un.sli4_config;
4645 if (!bf_get(lpfc_mbox_hdr_emb,
4646 &sli4_config->header.cfg_mhdr)) {
4647 /* rebuild the command for sli4 using our
4648 * own buffers like we do for biu diags
4650 header = (struct mbox_header *)
4651 &pmb->un.varWords[0];
4652 nembed_sge = (struct lpfc_mbx_nembed_cmd *)
4653 &pmb->un.varWords[0];
4654 receive_length = nembed_sge->sge[0].length;
4656 /* receive length cannot be greater than
4657 * mailbox extension size
4659 if ((receive_length == 0) ||
4661 BSG_MBOX_SIZE - sizeof(MAILBOX_t))) {
4666 nembed_sge->sge[0].pa_hi =
4667 putPaddrHigh(dmabuf->phys
4668 + sizeof(MAILBOX_t));
4669 nembed_sge->sge[0].pa_lo =
4670 putPaddrLow(dmabuf->phys
4671 + sizeof(MAILBOX_t));
4676 dd_data->context_un.mbox.dmabuffers = dmabuf;
4678 /* setup wake call as IOCB callback */
4679 pmboxq->mbox_cmpl = lpfc_bsg_issue_mbox_cmpl;
4681 /* setup context field to pass wait_queue pointer to wake function */
4682 pmboxq->context1 = dd_data;
4683 dd_data->type = TYPE_MBOX;
4684 dd_data->context_un.mbox.pmboxq = pmboxq;
4685 dd_data->context_un.mbox.mb = (MAILBOX_t *)pmbx;
4686 dd_data->context_un.mbox.set_job = job;
4687 dd_data->context_un.mbox.ext = ext;
4688 dd_data->context_un.mbox.mbOffset = mbox_req->mbOffset;
4689 dd_data->context_un.mbox.inExtWLen = mbox_req->inExtWLen;
4690 dd_data->context_un.mbox.outExtWLen = mbox_req->outExtWLen;
4691 job->dd_data = dd_data;
4693 if ((vport->fc_flag & FC_OFFLINE_MODE) ||
4694 (!(phba->sli.sli_flag & LPFC_SLI_ACTIVE))) {
4695 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_POLL);
4696 if (rc != MBX_SUCCESS) {
4697 rc = (rc == MBX_TIMEOUT) ? -ETIME : -ENODEV;
4701 /* job finished, copy the data */
4702 memcpy(pmbx, pmb, sizeof(*pmb));
4703 job->reply->reply_payload_rcv_len =
4704 sg_copy_from_buffer(job->reply_payload.sg_list,
4705 job->reply_payload.sg_cnt,
4707 /* not waiting mbox already done */
4712 rc = lpfc_sli_issue_mbox(phba, pmboxq, MBX_NOWAIT);
4713 if ((rc == MBX_SUCCESS) || (rc == MBX_BUSY))
4714 return 1; /* job started */
4717 /* common exit for error or job completed inline */
4719 mempool_free(pmboxq, phba->mbox_mem_pool);
4720 lpfc_bsg_dma_page_free(phba, dmabuf);
4728 * lpfc_bsg_mbox_cmd - process an fc bsg LPFC_BSG_VENDOR_MBOX command
4729 * @job: MBOX fc_bsg_job for LPFC_BSG_VENDOR_MBOX.
4732 lpfc_bsg_mbox_cmd(struct fc_bsg_job *job)
4734 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
4735 struct lpfc_hba *phba = vport->phba;
4736 struct dfc_mbox_req *mbox_req;
4739 /* mix-and-match backward compatibility */
4740 job->reply->reply_payload_rcv_len = 0;
4741 if (job->request_len <
4742 sizeof(struct fc_bsg_request) + sizeof(struct dfc_mbox_req)) {
4743 lpfc_printf_log(phba, KERN_INFO, LOG_LIBDFC,
4744 "2737 Mix-and-match backward compability "
4745 "between MBOX_REQ old size:%d and "
4746 "new request size:%d\n",
4747 (int)(job->request_len -
4748 sizeof(struct fc_bsg_request)),
4749 (int)sizeof(struct dfc_mbox_req));
4750 mbox_req = (struct dfc_mbox_req *)
4751 job->request->rqst_data.h_vendor.vendor_cmd;
4752 mbox_req->extMboxTag = 0;
4753 mbox_req->extSeqNum = 0;
4756 rc = lpfc_bsg_issue_mbox(phba, job, vport);
4760 job->reply->result = 0;
4761 job->dd_data = NULL;
4764 /* job submitted, will complete later*/
4765 rc = 0; /* return zero, no error */
4767 /* some error occurred */
4768 job->reply->result = rc;
4769 job->dd_data = NULL;
4776 * lpfc_bsg_menlo_cmd_cmp - lpfc_menlo_cmd completion handler
4777 * @phba: Pointer to HBA context object.
4778 * @cmdiocbq: Pointer to command iocb.
4779 * @rspiocbq: Pointer to response iocb.
4781 * This function is the completion handler for iocbs issued using
4782 * lpfc_menlo_cmd function. This function is called by the
4783 * ring event handler function without any lock held. This function
4784 * can be called from both worker thread context and interrupt
4785 * context. This function also can be called from another thread which
4786 * cleans up the SLI layer objects.
4787 * This function copies the contents of the response iocb to the
4788 * response iocb memory object provided by the caller of
4789 * lpfc_sli_issue_iocb_wait and then wakes up the thread which
4790 * sleeps for the iocb completion.
4793 lpfc_bsg_menlo_cmd_cmp(struct lpfc_hba *phba,
4794 struct lpfc_iocbq *cmdiocbq,
4795 struct lpfc_iocbq *rspiocbq)
4797 struct bsg_job_data *dd_data;
4798 struct fc_bsg_job *job;
4800 struct lpfc_dmabuf *bmp;
4801 struct lpfc_bsg_menlo *menlo;
4802 unsigned long flags;
4803 struct menlo_response *menlo_resp;
4806 spin_lock_irqsave(&phba->ct_ev_lock, flags);
4807 dd_data = cmdiocbq->context1;
4809 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4813 menlo = &dd_data->context_un.menlo;
4814 job = menlo->set_job;
4815 job->dd_data = NULL; /* so timeout handler does not reply */
4817 spin_lock(&phba->hbalock);
4818 cmdiocbq->iocb_flag |= LPFC_IO_WAKE;
4819 if (cmdiocbq->context2 && rspiocbq)
4820 memcpy(&((struct lpfc_iocbq *)cmdiocbq->context2)->iocb,
4821 &rspiocbq->iocb, sizeof(IOCB_t));
4822 spin_unlock(&phba->hbalock);
4825 rspiocbq = menlo->rspiocbq;
4826 rsp = &rspiocbq->iocb;
4828 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
4829 job->request_payload.sg_cnt, DMA_TO_DEVICE);
4830 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
4831 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
4833 /* always return the xri, this would be used in the case
4834 * of a menlo download to allow the data to be sent as a continuation
4837 menlo_resp = (struct menlo_response *)
4838 job->reply->reply_data.vendor_reply.vendor_rsp;
4839 menlo_resp->xri = rsp->ulpContext;
4840 if (rsp->ulpStatus) {
4841 if (rsp->ulpStatus == IOSTAT_LOCAL_REJECT) {
4842 switch (rsp->un.ulpWord[4] & IOERR_PARAM_MASK) {
4843 case IOERR_SEQUENCE_TIMEOUT:
4846 case IOERR_INVALID_RPI:
4856 job->reply->reply_payload_rcv_len =
4857 rsp->un.genreq64.bdl.bdeSize;
4859 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
4860 lpfc_sli_release_iocbq(phba, rspiocbq);
4861 lpfc_sli_release_iocbq(phba, cmdiocbq);
4864 /* make error code available to userspace */
4865 job->reply->result = rc;
4866 /* complete the job back to userspace */
4868 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
4873 * lpfc_menlo_cmd - send an ioctl for menlo hardware
4874 * @job: fc_bsg_job to handle
4876 * This function issues a gen request 64 CR ioctl for all menlo cmd requests,
4877 * all the command completions will return the xri for the command.
4878 * For menlo data requests a gen request 64 CX is used to continue the exchange
4879 * supplied in the menlo request header xri field.
4882 lpfc_menlo_cmd(struct fc_bsg_job *job)
4884 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
4885 struct lpfc_hba *phba = vport->phba;
4886 struct lpfc_iocbq *cmdiocbq, *rspiocbq;
4889 struct menlo_command *menlo_cmd;
4890 struct menlo_response *menlo_resp;
4891 struct lpfc_dmabuf *bmp = NULL;
4894 struct scatterlist *sgel = NULL;
4897 struct bsg_job_data *dd_data;
4898 struct ulp_bde64 *bpl = NULL;
4900 /* in case no data is returned return just the return code */
4901 job->reply->reply_payload_rcv_len = 0;
4903 if (job->request_len <
4904 sizeof(struct fc_bsg_request) +
4905 sizeof(struct menlo_command)) {
4906 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4907 "2784 Received MENLO_CMD request below "
4913 if (job->reply_len <
4914 sizeof(struct fc_bsg_request) + sizeof(struct menlo_response)) {
4915 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4916 "2785 Received MENLO_CMD reply below "
4922 if (!(phba->menlo_flag & HBA_MENLO_SUPPORT)) {
4923 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4924 "2786 Adapter does not support menlo "
4930 menlo_cmd = (struct menlo_command *)
4931 job->request->rqst_data.h_vendor.vendor_cmd;
4933 menlo_resp = (struct menlo_response *)
4934 job->reply->reply_data.vendor_reply.vendor_rsp;
4936 /* allocate our bsg tracking structure */
4937 dd_data = kmalloc(sizeof(struct bsg_job_data), GFP_KERNEL);
4939 lpfc_printf_log(phba, KERN_WARNING, LOG_LIBDFC,
4940 "2787 Failed allocation of dd_data\n");
4945 bmp = kmalloc(sizeof(struct lpfc_dmabuf), GFP_KERNEL);
4951 cmdiocbq = lpfc_sli_get_iocbq(phba);
4957 rspiocbq = lpfc_sli_get_iocbq(phba);
4963 rsp = &rspiocbq->iocb;
4965 bmp->virt = lpfc_mbuf_alloc(phba, 0, &bmp->phys);
4971 INIT_LIST_HEAD(&bmp->list);
4972 bpl = (struct ulp_bde64 *) bmp->virt;
4973 request_nseg = pci_map_sg(phba->pcidev, job->request_payload.sg_list,
4974 job->request_payload.sg_cnt, DMA_TO_DEVICE);
4975 for_each_sg(job->request_payload.sg_list, sgel, request_nseg, numbde) {
4976 busaddr = sg_dma_address(sgel);
4977 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64;
4978 bpl->tus.f.bdeSize = sg_dma_len(sgel);
4979 bpl->tus.w = cpu_to_le32(bpl->tus.w);
4980 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
4981 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
4985 reply_nseg = pci_map_sg(phba->pcidev, job->reply_payload.sg_list,
4986 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
4987 for_each_sg(job->reply_payload.sg_list, sgel, reply_nseg, numbde) {
4988 busaddr = sg_dma_address(sgel);
4989 bpl->tus.f.bdeFlags = BUFF_TYPE_BDE_64I;
4990 bpl->tus.f.bdeSize = sg_dma_len(sgel);
4991 bpl->tus.w = cpu_to_le32(bpl->tus.w);
4992 bpl->addrLow = cpu_to_le32(putPaddrLow(busaddr));
4993 bpl->addrHigh = cpu_to_le32(putPaddrHigh(busaddr));
4997 cmd = &cmdiocbq->iocb;
4998 cmd->un.genreq64.bdl.ulpIoTag32 = 0;
4999 cmd->un.genreq64.bdl.addrHigh = putPaddrHigh(bmp->phys);
5000 cmd->un.genreq64.bdl.addrLow = putPaddrLow(bmp->phys);
5001 cmd->un.genreq64.bdl.bdeFlags = BUFF_TYPE_BLP_64;
5002 cmd->un.genreq64.bdl.bdeSize =
5003 (request_nseg + reply_nseg) * sizeof(struct ulp_bde64);
5004 cmd->un.genreq64.w5.hcsw.Fctl = (SI | LA);
5005 cmd->un.genreq64.w5.hcsw.Dfctl = 0;
5006 cmd->un.genreq64.w5.hcsw.Rctl = FC_RCTL_DD_UNSOL_CMD;
5007 cmd->un.genreq64.w5.hcsw.Type = MENLO_TRANSPORT_TYPE; /* 0xfe */
5008 cmd->ulpBdeCount = 1;
5009 cmd->ulpClass = CLASS3;
5010 cmd->ulpOwner = OWN_CHIP;
5011 cmd->ulpLe = 1; /* Limited Edition */
5012 cmdiocbq->iocb_flag |= LPFC_IO_LIBDFC;
5013 cmdiocbq->vport = phba->pport;
5014 /* We want the firmware to timeout before we do */
5015 cmd->ulpTimeout = MENLO_TIMEOUT - 5;
5016 cmdiocbq->context3 = bmp;
5017 cmdiocbq->context2 = rspiocbq;
5018 cmdiocbq->iocb_cmpl = lpfc_bsg_menlo_cmd_cmp;
5019 cmdiocbq->context1 = dd_data;
5020 cmdiocbq->context2 = rspiocbq;
5021 if (menlo_cmd->cmd == LPFC_BSG_VENDOR_MENLO_CMD) {
5022 cmd->ulpCommand = CMD_GEN_REQUEST64_CR;
5023 cmd->ulpPU = MENLO_PU; /* 3 */
5024 cmd->un.ulpWord[4] = MENLO_DID; /* 0x0000FC0E */
5025 cmd->ulpContext = MENLO_CONTEXT; /* 0 */
5027 cmd->ulpCommand = CMD_GEN_REQUEST64_CX;
5029 cmd->un.ulpWord[4] = 0;
5030 cmd->ulpContext = menlo_cmd->xri;
5033 dd_data->type = TYPE_MENLO;
5034 dd_data->context_un.menlo.cmdiocbq = cmdiocbq;
5035 dd_data->context_un.menlo.rspiocbq = rspiocbq;
5036 dd_data->context_un.menlo.set_job = job;
5037 dd_data->context_un.menlo.bmp = bmp;
5039 rc = lpfc_sli_issue_iocb(phba, LPFC_ELS_RING, cmdiocbq,
5041 if (rc == IOCB_SUCCESS)
5042 return 0; /* done for now */
5044 /* iocb failed so cleanup */
5045 pci_unmap_sg(phba->pcidev, job->request_payload.sg_list,
5046 job->request_payload.sg_cnt, DMA_TO_DEVICE);
5047 pci_unmap_sg(phba->pcidev, job->reply_payload.sg_list,
5048 job->reply_payload.sg_cnt, DMA_FROM_DEVICE);
5050 lpfc_mbuf_free(phba, bmp->virt, bmp->phys);
5053 lpfc_sli_release_iocbq(phba, rspiocbq);
5055 lpfc_sli_release_iocbq(phba, cmdiocbq);
5061 /* make error code available to userspace */
5062 job->reply->result = rc;
5063 job->dd_data = NULL;
5068 * lpfc_bsg_hst_vendor - process a vendor-specific fc_bsg_job
5069 * @job: fc_bsg_job to handle
5072 lpfc_bsg_hst_vendor(struct fc_bsg_job *job)
5074 int command = job->request->rqst_data.h_vendor.vendor_cmd[0];
5078 case LPFC_BSG_VENDOR_SET_CT_EVENT:
5079 rc = lpfc_bsg_hba_set_event(job);
5081 case LPFC_BSG_VENDOR_GET_CT_EVENT:
5082 rc = lpfc_bsg_hba_get_event(job);
5084 case LPFC_BSG_VENDOR_SEND_MGMT_RESP:
5085 rc = lpfc_bsg_send_mgmt_rsp(job);
5087 case LPFC_BSG_VENDOR_DIAG_MODE:
5088 rc = lpfc_bsg_diag_loopback_mode(job);
5090 case LPFC_BSG_VENDOR_DIAG_MODE_END:
5091 rc = lpfc_sli4_bsg_diag_mode_end(job);
5093 case LPFC_BSG_VENDOR_DIAG_RUN_LOOPBACK:
5094 rc = lpfc_bsg_diag_loopback_run(job);
5096 case LPFC_BSG_VENDOR_LINK_DIAG_TEST:
5097 rc = lpfc_sli4_bsg_link_diag_test(job);
5099 case LPFC_BSG_VENDOR_GET_MGMT_REV:
5100 rc = lpfc_bsg_get_dfc_rev(job);
5102 case LPFC_BSG_VENDOR_MBOX:
5103 rc = lpfc_bsg_mbox_cmd(job);
5105 case LPFC_BSG_VENDOR_MENLO_CMD:
5106 case LPFC_BSG_VENDOR_MENLO_DATA:
5107 rc = lpfc_menlo_cmd(job);
5111 job->reply->reply_payload_rcv_len = 0;
5112 /* make error code available to userspace */
5113 job->reply->result = rc;
5121 * lpfc_bsg_request - handle a bsg request from the FC transport
5122 * @job: fc_bsg_job to handle
5125 lpfc_bsg_request(struct fc_bsg_job *job)
5130 msgcode = job->request->msgcode;
5132 case FC_BSG_HST_VENDOR:
5133 rc = lpfc_bsg_hst_vendor(job);
5135 case FC_BSG_RPT_ELS:
5136 rc = lpfc_bsg_rport_els(job);
5139 rc = lpfc_bsg_send_mgmt_cmd(job);
5143 job->reply->reply_payload_rcv_len = 0;
5144 /* make error code available to userspace */
5145 job->reply->result = rc;
5153 * lpfc_bsg_timeout - handle timeout of a bsg request from the FC transport
5154 * @job: fc_bsg_job that has timed out
5156 * This function just aborts the job's IOCB. The aborted IOCB will return to
5157 * the waiting function which will handle passing the error back to userspace
5160 lpfc_bsg_timeout(struct fc_bsg_job *job)
5162 struct lpfc_vport *vport = (struct lpfc_vport *)job->shost->hostdata;
5163 struct lpfc_hba *phba = vport->phba;
5164 struct lpfc_iocbq *cmdiocb;
5165 struct lpfc_bsg_event *evt;
5166 struct lpfc_bsg_iocb *iocb;
5167 struct lpfc_bsg_mbox *mbox;
5168 struct lpfc_bsg_menlo *menlo;
5169 struct lpfc_sli_ring *pring = &phba->sli.ring[LPFC_ELS_RING];
5170 struct bsg_job_data *dd_data;
5171 unsigned long flags;
5173 spin_lock_irqsave(&phba->ct_ev_lock, flags);
5174 dd_data = (struct bsg_job_data *)job->dd_data;
5175 /* timeout and completion crossed paths if no dd_data */
5177 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5181 switch (dd_data->type) {
5183 iocb = &dd_data->context_un.iocb;
5184 cmdiocb = iocb->cmdiocbq;
5185 /* hint to completion handler that the job timed out */
5186 job->reply->result = -EAGAIN;
5187 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5188 /* this will call our completion handler */
5189 spin_lock_irq(&phba->hbalock);
5190 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
5191 spin_unlock_irq(&phba->hbalock);
5194 evt = dd_data->context_un.evt;
5195 /* this event has no job anymore */
5196 evt->set_job = NULL;
5197 job->dd_data = NULL;
5198 job->reply->reply_payload_rcv_len = 0;
5199 /* Return -EAGAIN which is our way of signallying the
5202 job->reply->result = -EAGAIN;
5203 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5207 mbox = &dd_data->context_un.mbox;
5208 /* this mbox has no job anymore */
5209 mbox->set_job = NULL;
5210 job->dd_data = NULL;
5211 job->reply->reply_payload_rcv_len = 0;
5212 job->reply->result = -EAGAIN;
5213 /* the mbox completion handler can now be run */
5214 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5216 if (phba->mbox_ext_buf_ctx.state == LPFC_BSG_MBOX_PORT)
5217 phba->mbox_ext_buf_ctx.state = LPFC_BSG_MBOX_ABTS;
5220 menlo = &dd_data->context_un.menlo;
5221 cmdiocb = menlo->cmdiocbq;
5222 /* hint to completion handler that the job timed out */
5223 job->reply->result = -EAGAIN;
5224 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5225 /* this will call our completion handler */
5226 spin_lock_irq(&phba->hbalock);
5227 lpfc_sli_issue_abort_iotag(phba, pring, cmdiocb);
5228 spin_unlock_irq(&phba->hbalock);
5231 spin_unlock_irqrestore(&phba->ct_ev_lock, flags);
5235 /* scsi transport fc fc_bsg_job_timeout expects a zero return code,
5236 * otherwise an error message will be displayed on the console
5237 * so always return success (zero)