]> Pileus Git - ~andy/linux/commitdiff
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 31 Jan 2014 23:31:23 +0000 (15:31 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 31 Jan 2014 23:31:23 +0000 (15:31 -0800)
Pull SCSI target updates from Nicholas Bellinger:
 "The highlights this round include:

  - add support for SCSI Referrals (Hannes)
  - add support for T10 DIF into target core (nab + mkp)
  - add support for T10 DIF emulation in FILEIO + RAMDISK backends (Sagi + nab)
  - add support for T10 DIF -> bio_integrity passthrough in IBLOCK backend (nab)
  - prep changes to iser-target for >= v3.15 T10 DIF support (Sagi)
  - add support for qla2xxx N_Port ID Virtualization - NPIV (Saurav + Quinn)
  - allow percpu_ida_alloc() to receive task state bitmask (Kent)
  - fix >= v3.12 iscsi-target session reset hung task regression (nab)
  - fix >= v3.13 percpu_ref se_lun->lun_ref_active race (nab)
  - fix a long-standing network portal creation race (Andy)"

* 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/nab/target-pending: (51 commits)
  target: Fix percpu_ref_put race in transport_lun_remove_cmd
  target/iscsi: Fix network portal creation race
  target: Report bad sector in sense data for DIF errors
  iscsi-target: Convert gfp_t parameter to task state bitmask
  iscsi-target: Fix connection reset hang with percpu_ida_alloc
  percpu_ida: Make percpu_ida_alloc + callers accept task state bitmask
  iscsi-target: Pre-allocate more tags to avoid ack starvation
  qla2xxx: Configure NPIV fc_vport via tcm_qla2xxx_npiv_make_lport
  qla2xxx: Enhancements to enable NPIV support for QLOGIC ISPs with TCM/LIO.
  qla2xxx: Fix scsi_host leak on qlt_lport_register callback failure
  IB/isert: pass scatterlist instead of cmd to fast_reg_mr routine
  IB/isert: Move fastreg descriptor creation to a function
  IB/isert: Avoid frwr notation, user fastreg
  IB/isert: seperate connection protection domains and dma MRs
  tcm_loop: Enable DIF/DIX modes in SCSI host LLD
  target/rd: Add DIF protection into rd_execute_rw
  target/rd: Add support for protection SGL setup + release
  target/rd: Refactor rd_build_device_space + rd_release_device_space
  target/file: Add DIF protection support to fd_execute_rw
  target/file: Add DIF protection init/format support
  ...

12 files changed:
1  2 
drivers/infiniband/ulp/isert/ib_isert.c
drivers/scsi/qla2xxx/qla_attr.c
drivers/scsi/qla2xxx/qla_def.h
drivers/scsi/qla2xxx/qla_target.c
drivers/target/iscsi/iscsi_target.c
drivers/target/target_core_device.c
drivers/target/target_core_file.c
drivers/target/target_core_file.h
drivers/target/target_core_iblock.c
drivers/target/target_core_tpg.c
drivers/vhost/scsi.c
include/target/target_core_base.h

index 9804fca6bf0605a074c13f470e4bf6cf8413bd66,63bcf69e9fe240705064169d22b1b6d3a73c376a..2b161be3c1a346e3a7203a7f88a624dac4df1cee
@@@ -47,10 -47,10 +47,10 @@@ static in
  isert_map_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
               struct isert_rdma_wr *wr);
  static void
- isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
+ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn);
  static int
- isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
-                   struct isert_rdma_wr *wr);
+ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+              struct isert_rdma_wr *wr);
  
  static void
  isert_qp_event_callback(struct ib_event *e, void *context)
@@@ -207,9 -207,7 +207,9 @@@ isert_free_rx_descriptors(struct isert_
        isert_conn->conn_rx_descs = NULL;
  }
  
 +static void isert_cq_tx_work(struct work_struct *);
  static void isert_cq_tx_callback(struct ib_cq *, void *);
 +static void isert_cq_rx_work(struct work_struct *);
  static void isert_cq_rx_callback(struct ib_cq *, void *);
  
  static int
@@@ -227,11 -225,11 +227,11 @@@ isert_create_device_ib_res(struct isert
  
        /* asign function handlers */
        if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
-               device->use_frwr = 1;
-               device->reg_rdma_mem = isert_reg_rdma_frwr;
-               device->unreg_rdma_mem = isert_unreg_rdma_frwr;
+               device->use_fastreg = 1;
+               device->reg_rdma_mem = isert_reg_rdma;
+               device->unreg_rdma_mem = isert_unreg_rdma;
        } else {
-               device->use_frwr = 0;
+               device->use_fastreg = 0;
                device->reg_rdma_mem = isert_map_rdma;
                device->unreg_rdma_mem = isert_unmap_cmd;
        }
        device->cqs_used = min_t(int, num_online_cpus(),
                                 device->ib_device->num_comp_vectors);
        device->cqs_used = min(ISERT_MAX_CQ, device->cqs_used);
-       pr_debug("Using %d CQs, device %s supports %d vectors support FRWR %d\n",
+       pr_debug("Using %d CQs, device %s supports %d vectors support "
+                "Fast registration %d\n",
                 device->cqs_used, device->ib_device->name,
-                device->ib_device->num_comp_vectors, device->use_frwr);
+                device->ib_device->num_comp_vectors, device->use_fastreg);
        device->cq_desc = kzalloc(sizeof(struct isert_cq_desc) *
                                device->cqs_used, GFP_KERNEL);
        if (!device->cq_desc) {
        }
        cq_desc = device->cq_desc;
  
-       device->dev_pd = ib_alloc_pd(ib_dev);
-       if (IS_ERR(device->dev_pd)) {
-               ret = PTR_ERR(device->dev_pd);
-               pr_err("ib_alloc_pd failed for dev_pd: %d\n", ret);
-               goto out_cq_desc;
-       }
        for (i = 0; i < device->cqs_used; i++) {
                cq_desc[i].device = device;
                cq_desc[i].cq_index = i;
  
 +              INIT_WORK(&cq_desc[i].cq_rx_work, isert_cq_rx_work);
                device->dev_rx_cq[i] = ib_create_cq(device->ib_device,
                                                isert_cq_rx_callback,
                                                isert_cq_event_callback,
                                                (void *)&cq_desc[i],
                                                ISER_MAX_RX_CQ_LEN, i);
 -              if (IS_ERR(device->dev_rx_cq[i]))
 +              if (IS_ERR(device->dev_rx_cq[i])) {
 +                      ret = PTR_ERR(device->dev_rx_cq[i]);
 +                      device->dev_rx_cq[i] = NULL;
                        goto out_cq;
 +              }
  
 +              INIT_WORK(&cq_desc[i].cq_tx_work, isert_cq_tx_work);
                device->dev_tx_cq[i] = ib_create_cq(device->ib_device,
                                                isert_cq_tx_callback,
                                                isert_cq_event_callback,
                                                (void *)&cq_desc[i],
                                                ISER_MAX_TX_CQ_LEN, i);
 -              if (IS_ERR(device->dev_tx_cq[i]))
 +              if (IS_ERR(device->dev_tx_cq[i])) {
 +                      ret = PTR_ERR(device->dev_tx_cq[i]);
 +                      device->dev_tx_cq[i] = NULL;
                        goto out_cq;
 +              }
  
 -              if (ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP))
 +              ret = ib_req_notify_cq(device->dev_rx_cq[i], IB_CQ_NEXT_COMP);
 +              if (ret)
                        goto out_cq;
  
 -              if (ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP))
 +              ret = ib_req_notify_cq(device->dev_tx_cq[i], IB_CQ_NEXT_COMP);
 +              if (ret)
                        goto out_cq;
        }
  
-       device->dev_mr = ib_get_dma_mr(device->dev_pd, IB_ACCESS_LOCAL_WRITE);
-       if (IS_ERR(device->dev_mr)) {
-               ret = PTR_ERR(device->dev_mr);
-               pr_err("ib_get_dma_mr failed for dev_mr: %d\n", ret);
-               goto out_cq;
-       }
        return 0;
  
  out_cq:
                        ib_destroy_cq(device->dev_tx_cq[j]);
                }
        }
-       ib_dealloc_pd(device->dev_pd);
- out_cq_desc:
        kfree(device->cq_desc);
  
        return ret;
@@@ -341,8 -313,6 +325,6 @@@ isert_free_device_ib_res(struct isert_d
                device->dev_tx_cq[i] = NULL;
        }
  
-       ib_dereg_mr(device->dev_mr);
-       ib_dealloc_pd(device->dev_pd);
        kfree(device->cq_desc);
  }
  
@@@ -398,18 -368,18 +380,18 @@@ isert_device_find_by_ib_dev(struct rdma
  }
  
  static void
- isert_conn_free_frwr_pool(struct isert_conn *isert_conn)
+ isert_conn_free_fastreg_pool(struct isert_conn *isert_conn)
  {
        struct fast_reg_descriptor *fr_desc, *tmp;
        int i = 0;
  
-       if (list_empty(&isert_conn->conn_frwr_pool))
+       if (list_empty(&isert_conn->conn_fr_pool))
                return;
  
-       pr_debug("Freeing conn %p frwr pool", isert_conn);
+       pr_debug("Freeing conn %p fastreg pool", isert_conn);
  
        list_for_each_entry_safe(fr_desc, tmp,
-                                &isert_conn->conn_frwr_pool, list) {
+                                &isert_conn->conn_fr_pool, list) {
                list_del(&fr_desc->list);
                ib_free_fast_reg_page_list(fr_desc->data_frpl);
                ib_dereg_mr(fr_desc->data_mr);
                ++i;
        }
  
-       if (i < isert_conn->conn_frwr_pool_size)
+       if (i < isert_conn->conn_fr_pool_size)
                pr_warn("Pool still has %d regions registered\n",
-                       isert_conn->conn_frwr_pool_size - i);
+                       isert_conn->conn_fr_pool_size - i);
+ }
+ static int
+ isert_create_fr_desc(struct ib_device *ib_device, struct ib_pd *pd,
+                    struct fast_reg_descriptor *fr_desc)
+ {
+       fr_desc->data_frpl = ib_alloc_fast_reg_page_list(ib_device,
+                                                        ISCSI_ISER_SG_TABLESIZE);
+       if (IS_ERR(fr_desc->data_frpl)) {
+               pr_err("Failed to allocate data frpl err=%ld\n",
+                      PTR_ERR(fr_desc->data_frpl));
+               return PTR_ERR(fr_desc->data_frpl);
+       }
+       fr_desc->data_mr = ib_alloc_fast_reg_mr(pd, ISCSI_ISER_SG_TABLESIZE);
+       if (IS_ERR(fr_desc->data_mr)) {
+               pr_err("Failed to allocate data frmr err=%ld\n",
+                      PTR_ERR(fr_desc->data_mr));
+               ib_free_fast_reg_page_list(fr_desc->data_frpl);
+               return PTR_ERR(fr_desc->data_mr);
+       }
+       pr_debug("Create fr_desc %p page_list %p\n",
+                fr_desc, fr_desc->data_frpl->page_list);
+       fr_desc->valid = true;
+       return 0;
  }
  
  static int
- isert_conn_create_frwr_pool(struct isert_conn *isert_conn)
+ isert_conn_create_fastreg_pool(struct isert_conn *isert_conn)
  {
        struct fast_reg_descriptor *fr_desc;
        struct isert_device *device = isert_conn->conn_device;
        int i, ret;
  
-       INIT_LIST_HEAD(&isert_conn->conn_frwr_pool);
-       isert_conn->conn_frwr_pool_size = 0;
+       INIT_LIST_HEAD(&isert_conn->conn_fr_pool);
+       isert_conn->conn_fr_pool_size = 0;
        for (i = 0; i < ISCSI_DEF_XMIT_CMDS_MAX; i++) {
                fr_desc = kzalloc(sizeof(*fr_desc), GFP_KERNEL);
                if (!fr_desc) {
                        goto err;
                }
  
-               fr_desc->data_frpl =
-                       ib_alloc_fast_reg_page_list(device->ib_device,
-                                                   ISCSI_ISER_SG_TABLESIZE);
-               if (IS_ERR(fr_desc->data_frpl)) {
-                       pr_err("Failed to allocate fr_pg_list err=%ld\n",
-                              PTR_ERR(fr_desc->data_frpl));
-                       ret = PTR_ERR(fr_desc->data_frpl);
-                       goto err;
-               }
-               fr_desc->data_mr = ib_alloc_fast_reg_mr(device->dev_pd,
-                                       ISCSI_ISER_SG_TABLESIZE);
-               if (IS_ERR(fr_desc->data_mr)) {
-                       pr_err("Failed to allocate frmr err=%ld\n",
-                              PTR_ERR(fr_desc->data_mr));
-                       ret = PTR_ERR(fr_desc->data_mr);
-                       ib_free_fast_reg_page_list(fr_desc->data_frpl);
+               ret = isert_create_fr_desc(device->ib_device,
+                                          isert_conn->conn_pd, fr_desc);
+               if (ret) {
+                       pr_err("Failed to create fastreg descriptor err=%d\n",
+                              ret);
                        goto err;
                }
-               pr_debug("Create fr_desc %p page_list %p\n",
-                        fr_desc, fr_desc->data_frpl->page_list);
  
-               fr_desc->valid = true;
-               list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
-               isert_conn->conn_frwr_pool_size++;
+               list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
+               isert_conn->conn_fr_pool_size++;
        }
  
-       pr_debug("Creating conn %p frwr pool size=%d",
-                isert_conn, isert_conn->conn_frwr_pool_size);
+       pr_debug("Creating conn %p fastreg pool size=%d",
+                isert_conn, isert_conn->conn_fr_pool_size);
  
        return 0;
  
  err:
-       isert_conn_free_frwr_pool(isert_conn);
+       isert_conn_free_fastreg_pool(isert_conn);
        return ret;
  }
  
@@@ -558,14 -540,29 +552,29 @@@ isert_connect_request(struct rdma_cm_i
        }
  
        isert_conn->conn_device = device;
-       isert_conn->conn_pd = device->dev_pd;
-       isert_conn->conn_mr = device->dev_mr;
+       isert_conn->conn_pd = ib_alloc_pd(isert_conn->conn_device->ib_device);
+       if (IS_ERR(isert_conn->conn_pd)) {
+               ret = PTR_ERR(isert_conn->conn_pd);
+               pr_err("ib_alloc_pd failed for conn %p: ret=%d\n",
+                      isert_conn, ret);
+               goto out_pd;
+       }
  
-       if (device->use_frwr) {
-               ret = isert_conn_create_frwr_pool(isert_conn);
+       isert_conn->conn_mr = ib_get_dma_mr(isert_conn->conn_pd,
+                                          IB_ACCESS_LOCAL_WRITE);
+       if (IS_ERR(isert_conn->conn_mr)) {
+               ret = PTR_ERR(isert_conn->conn_mr);
+               pr_err("ib_get_dma_mr failed for conn %p: ret=%d\n",
+                      isert_conn, ret);
+               goto out_mr;
+       }
+       if (device->use_fastreg) {
+               ret = isert_conn_create_fastreg_pool(isert_conn);
                if (ret) {
-                       pr_err("Conn: %p failed to create frwr_pool\n", isert_conn);
-                       goto out_frwr;
+                       pr_err("Conn: %p failed to create fastreg pool\n",
+                              isert_conn);
+                       goto out_fastreg;
                }
        }
  
        return 0;
  
  out_conn_dev:
-       if (device->use_frwr)
-               isert_conn_free_frwr_pool(isert_conn);
- out_frwr:
+       if (device->use_fastreg)
+               isert_conn_free_fastreg_pool(isert_conn);
+ out_fastreg:
+       ib_dereg_mr(isert_conn->conn_mr);
+ out_mr:
+       ib_dealloc_pd(isert_conn->conn_pd);
+ out_pd:
        isert_device_try_release(device);
  out_rsp_dma_map:
        ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
@@@ -608,8 -609,8 +621,8 @@@ isert_connect_release(struct isert_con
  
        pr_debug("Entering isert_connect_release(): >>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>\n");
  
-       if (device && device->use_frwr)
-               isert_conn_free_frwr_pool(isert_conn);
+       if (device && device->use_fastreg)
+               isert_conn_free_fastreg_pool(isert_conn);
  
        if (isert_conn->conn_qp) {
                cq_index = ((struct isert_cq_desc *)
        isert_free_rx_descriptors(isert_conn);
        rdma_destroy_id(isert_conn->conn_cm_id);
  
+       ib_dereg_mr(isert_conn->conn_mr);
+       ib_dealloc_pd(isert_conn->conn_pd);
        if (isert_conn->login_buf) {
                ib_dma_unmap_single(ib_dev, isert_conn->login_rsp_dma,
                                    ISER_RX_LOGIN_SIZE, DMA_TO_DEVICE);
@@@ -1024,13 -1028,13 +1040,13 @@@ isert_rx_login_req(struct iser_rx_desc 
  }
  
  static struct iscsi_cmd
- *isert_allocate_cmd(struct iscsi_conn *conn, gfp_t gfp)
+ *isert_allocate_cmd(struct iscsi_conn *conn)
  {
        struct isert_conn *isert_conn = (struct isert_conn *)conn->context;
        struct isert_cmd *isert_cmd;
        struct iscsi_cmd *cmd;
  
-       cmd = iscsit_allocate_cmd(conn, gfp);
+       cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
        if (!cmd) {
                pr_err("Unable to allocate iscsi_cmd + isert_cmd\n");
                return NULL;
@@@ -1219,7 -1223,7 +1235,7 @@@ isert_rx_opcode(struct isert_conn *iser
  
        switch (opcode) {
        case ISCSI_OP_SCSI_CMD:
-               cmd = isert_allocate_cmd(conn, GFP_KERNEL);
+               cmd = isert_allocate_cmd(conn);
                if (!cmd)
                        break;
  
                                        rx_desc, (unsigned char *)hdr);
                break;
        case ISCSI_OP_NOOP_OUT:
-               cmd = isert_allocate_cmd(conn, GFP_KERNEL);
+               cmd = isert_allocate_cmd(conn);
                if (!cmd)
                        break;
  
                                                (unsigned char *)hdr);
                break;
        case ISCSI_OP_SCSI_TMFUNC:
-               cmd = isert_allocate_cmd(conn, GFP_KERNEL);
+               cmd = isert_allocate_cmd(conn);
                if (!cmd)
                        break;
  
                                                (unsigned char *)hdr);
                break;
        case ISCSI_OP_LOGOUT:
-               cmd = isert_allocate_cmd(conn, GFP_KERNEL);
+               cmd = isert_allocate_cmd(conn);
                if (!cmd)
                        break;
  
                                                    HZ);
                break;
        case ISCSI_OP_TEXT:
-               cmd = isert_allocate_cmd(conn, GFP_KERNEL);
+               cmd = isert_allocate_cmd(conn);
                if (!cmd)
                        break;
  
@@@ -1404,25 -1408,25 +1420,25 @@@ isert_unmap_cmd(struct isert_cmd *isert
  }
  
  static void
- isert_unreg_rdma_frwr(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
+ isert_unreg_rdma(struct isert_cmd *isert_cmd, struct isert_conn *isert_conn)
  {
        struct isert_rdma_wr *wr = &isert_cmd->rdma_wr;
        struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
        LIST_HEAD(unmap_list);
  
-       pr_debug("unreg_frwr_cmd: %p\n", isert_cmd);
+       pr_debug("unreg_fastreg_cmd: %p\n", isert_cmd);
  
        if (wr->fr_desc) {
-               pr_debug("unreg_frwr_cmd: %p free fr_desc %p\n",
+               pr_debug("unreg_fastreg_cmd: %p free fr_desc %p\n",
                         isert_cmd, wr->fr_desc);
                spin_lock_bh(&isert_conn->conn_lock);
-               list_add_tail(&wr->fr_desc->list, &isert_conn->conn_frwr_pool);
+               list_add_tail(&wr->fr_desc->list, &isert_conn->conn_fr_pool);
                spin_unlock_bh(&isert_conn->conn_lock);
                wr->fr_desc = NULL;
        }
  
        if (wr->sge) {
-               pr_debug("unreg_frwr_cmd: %p unmap_sg op\n", isert_cmd);
+               pr_debug("unreg_fastreg_cmd: %p unmap_sg op\n", isert_cmd);
                ib_dma_unmap_sg(ib_dev, wr->sge, wr->num_sge,
                                (wr->iser_ib_op == ISER_IB_RDMA_WRITE) ?
                                DMA_TO_DEVICE : DMA_FROM_DEVICE);
@@@ -1736,6 -1740,7 +1752,6 @@@ isert_cq_tx_callback(struct ib_cq *cq, 
  {
        struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
  
 -      INIT_WORK(&cq_desc->cq_tx_work, isert_cq_tx_work);
        queue_work(isert_comp_wq, &cq_desc->cq_tx_work);
  }
  
@@@ -1779,6 -1784,7 +1795,6 @@@ isert_cq_rx_callback(struct ib_cq *cq, 
  {
        struct isert_cq_desc *cq_desc = (struct isert_cq_desc *)context;
  
 -      INIT_WORK(&cq_desc->cq_rx_work, isert_cq_rx_work);
        queue_work(isert_rx_wq, &cq_desc->cq_rx_work);
  }
  
@@@ -2163,26 -2169,22 +2179,22 @@@ isert_map_fr_pagelist(struct ib_device 
  
  static int
  isert_fast_reg_mr(struct fast_reg_descriptor *fr_desc,
-                 struct isert_cmd *isert_cmd, struct isert_conn *isert_conn,
-                 struct ib_sge *ib_sge, u32 offset, unsigned int data_len)
+                 struct isert_conn *isert_conn, struct scatterlist *sg_start,
+                 struct ib_sge *ib_sge, u32 sg_nents, u32 offset,
+                 unsigned int data_len)
  {
-       struct iscsi_cmd *cmd = isert_cmd->iscsi_cmd;
        struct ib_device *ib_dev = isert_conn->conn_cm_id->device;
-       struct scatterlist *sg_start;
-       u32 sg_off, page_off;
        struct ib_send_wr fr_wr, inv_wr;
        struct ib_send_wr *bad_wr, *wr = NULL;
+       int ret, pagelist_len;
+       u32 page_off;
        u8 key;
-       int ret, sg_nents, pagelist_len;
  
-       sg_off = offset / PAGE_SIZE;
-       sg_start = &cmd->se_cmd.t_data_sg[sg_off];
-       sg_nents = min_t(unsigned int, cmd->se_cmd.t_data_nents - sg_off,
-                        ISCSI_ISER_SG_TABLESIZE);
+       sg_nents = min_t(unsigned int, sg_nents, ISCSI_ISER_SG_TABLESIZE);
        page_off = offset % PAGE_SIZE;
  
-       pr_debug("Cmd: %p use fr_desc %p sg_nents %d sg_off %d offset %u\n",
-                isert_cmd, fr_desc, sg_nents, sg_off, offset);
+       pr_debug("Use fr_desc %p sg_nents %d offset %u\n",
+                fr_desc, sg_nents, offset);
  
        pagelist_len = isert_map_fr_pagelist(ib_dev, sg_start, sg_nents,
                                             &fr_desc->data_frpl->page_list[0]);
  }
  
  static int
- isert_reg_rdma_frwr(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
-                   struct isert_rdma_wr *wr)
+ isert_reg_rdma(struct iscsi_conn *conn, struct iscsi_cmd *cmd,
+              struct isert_rdma_wr *wr)
  {
        struct se_cmd *se_cmd = &cmd->se_cmd;
        struct isert_cmd *isert_cmd = iscsit_priv_cmd(cmd);
        if (wr->iser_ib_op == ISER_IB_RDMA_WRITE) {
                data_left = se_cmd->data_length;
        } else {
-               sg_off = cmd->write_data_done / PAGE_SIZE;
-               data_left = se_cmd->data_length - cmd->write_data_done;
                offset = cmd->write_data_done;
+               sg_off = offset / PAGE_SIZE;
+               data_left = se_cmd->data_length - cmd->write_data_done;
                isert_cmd->tx_desc.isert_cmd = isert_cmd;
        }
  
                wr->fr_desc = NULL;
        } else {
                spin_lock_irqsave(&isert_conn->conn_lock, flags);
-               fr_desc = list_first_entry(&isert_conn->conn_frwr_pool,
+               fr_desc = list_first_entry(&isert_conn->conn_fr_pool,
                                           struct fast_reg_descriptor, list);
                list_del(&fr_desc->list);
                spin_unlock_irqrestore(&isert_conn->conn_lock, flags);
                wr->fr_desc = fr_desc;
  
-               ret = isert_fast_reg_mr(fr_desc, isert_cmd, isert_conn,
-                                 ib_sge, offset, data_len);
+               ret = isert_fast_reg_mr(fr_desc, isert_conn, sg_start,
+                                       ib_sge, sg_nents, offset, data_len);
                if (ret) {
-                       list_add_tail(&fr_desc->list, &isert_conn->conn_frwr_pool);
+                       list_add_tail(&fr_desc->list, &isert_conn->conn_fr_pool);
                        goto unmap_sg;
                }
        }
index 570c7fcc0c4df5dff1c805754757c598e5cb311d,9c412b54bbd3fa669512be1d13c866a1fb7d055b..4a0d7c92181f974730b7964a029822ef4a565297
@@@ -862,7 -862,7 +862,7 @@@ qla2x00_alloc_sysfs_attr(scsi_qla_host_
  }
  
  void
 -qla2x00_free_sysfs_attr(scsi_qla_host_t *vha)
 +qla2x00_free_sysfs_attr(scsi_qla_host_t *vha, bool stop_beacon)
  {
        struct Scsi_Host *host = vha->host;
        struct sysfs_entry *iter;
                    iter->attr);
        }
  
 -      if (ha->beacon_blink_led == 1)
 +      if (stop_beacon && ha->beacon_blink_led == 1)
                ha->isp_ops->beacon_off(vha);
  }
  
@@@ -890,7 -890,7 +890,7 @@@ static ssize_
  qla2x00_drvr_version_show(struct device *dev,
                          struct device_attribute *attr, char *buf)
  {
 -      return snprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
 +      return scnprintf(buf, PAGE_SIZE, "%s\n", qla2x00_version_str);
  }
  
  static ssize_t
@@@ -901,7 -901,7 +901,7 @@@ qla2x00_fw_version_show(struct device *
        struct qla_hw_data *ha = vha->hw;
        char fw_str[128];
  
 -      return snprintf(buf, PAGE_SIZE, "%s\n",
 +      return scnprintf(buf, PAGE_SIZE, "%s\n",
            ha->isp_ops->fw_version_str(vha, fw_str));
  }
  
@@@ -914,15 -914,15 +914,15 @@@ qla2x00_serial_num_show(struct device *
        uint32_t sn;
  
        if (IS_QLAFX00(vha->hw)) {
 -              return snprintf(buf, PAGE_SIZE, "%s\n",
 +              return scnprintf(buf, PAGE_SIZE, "%s\n",
                    vha->hw->mr.serial_num);
        } else if (IS_FWI2_CAPABLE(ha)) {
 -              qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE);
 -              return snprintf(buf, PAGE_SIZE, "%s\n", buf);
 +              qla2xxx_get_vpd_field(vha, "SN", buf, PAGE_SIZE - 1);
 +              return strlen(strcat(buf, "\n"));
        }
  
        sn = ((ha->serial0 & 0x1f) << 16) | (ha->serial2 << 8) | ha->serial1;
 -      return snprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
 +      return scnprintf(buf, PAGE_SIZE, "%c%05d\n", 'A' + sn / 100000,
            sn % 100000);
  }
  
@@@ -931,7 -931,7 +931,7 @@@ qla2x00_isp_name_show(struct device *de
                      char *buf)
  {
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 -      return snprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
 +      return scnprintf(buf, PAGE_SIZE, "ISP%04X\n", vha->hw->pdev->device);
  }
  
  static ssize_t
@@@ -942,10 -942,10 +942,10 @@@ qla2x00_isp_id_show(struct device *dev
        struct qla_hw_data *ha = vha->hw;
  
        if (IS_QLAFX00(vha->hw))
 -              return snprintf(buf, PAGE_SIZE, "%s\n",
 +              return scnprintf(buf, PAGE_SIZE, "%s\n",
                    vha->hw->mr.hw_version);
  
 -      return snprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
 +      return scnprintf(buf, PAGE_SIZE, "%04x %04x %04x %04x\n",
            ha->product_id[0], ha->product_id[1], ha->product_id[2],
            ha->product_id[3]);
  }
@@@ -956,7 -956,11 +956,7 @@@ qla2x00_model_name_show(struct device *
  {
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  
 -      if (IS_QLAFX00(vha->hw))
 -              return snprintf(buf, PAGE_SIZE, "%s\n",
 -                  vha->hw->mr.product_name);
 -
 -      return snprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
 +      return scnprintf(buf, PAGE_SIZE, "%s\n", vha->hw->model_number);
  }
  
  static ssize_t
@@@ -964,7 -968,7 +964,7 @@@ qla2x00_model_desc_show(struct device *
                        char *buf)
  {
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 -      return snprintf(buf, PAGE_SIZE, "%s\n",
 +      return scnprintf(buf, PAGE_SIZE, "%s\n",
            vha->hw->model_desc ? vha->hw->model_desc : "");
  }
  
@@@ -975,7 -979,7 +975,7 @@@ qla2x00_pci_info_show(struct device *de
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
        char pci_info[30];
  
 -      return snprintf(buf, PAGE_SIZE, "%s\n",
 +      return scnprintf(buf, PAGE_SIZE, "%s\n",
            vha->hw->isp_ops->pci_info_str(vha, pci_info));
  }
  
@@@ -990,29 -994,29 +990,29 @@@ qla2x00_link_state_show(struct device *
        if (atomic_read(&vha->loop_state) == LOOP_DOWN ||
            atomic_read(&vha->loop_state) == LOOP_DEAD ||
            vha->device_flags & DFLG_NO_CABLE)
 -              len = snprintf(buf, PAGE_SIZE, "Link Down\n");
 +              len = scnprintf(buf, PAGE_SIZE, "Link Down\n");
        else if (atomic_read(&vha->loop_state) != LOOP_READY ||
            qla2x00_reset_active(vha))
 -              len = snprintf(buf, PAGE_SIZE, "Unknown Link State\n");
 +              len = scnprintf(buf, PAGE_SIZE, "Unknown Link State\n");
        else {
 -              len = snprintf(buf, PAGE_SIZE, "Link Up - ");
 +              len = scnprintf(buf, PAGE_SIZE, "Link Up - ");
  
                switch (ha->current_topology) {
                case ISP_CFG_NL:
 -                      len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
 +                      len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
                        break;
                case ISP_CFG_FL:
 -                      len += snprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
 +                      len += scnprintf(buf + len, PAGE_SIZE-len, "FL_Port\n");
                        break;
                case ISP_CFG_N:
 -                      len += snprintf(buf + len, PAGE_SIZE-len,
 +                      len += scnprintf(buf + len, PAGE_SIZE-len,
                            "N_Port to N_Port\n");
                        break;
                case ISP_CFG_F:
 -                      len += snprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
 +                      len += scnprintf(buf + len, PAGE_SIZE-len, "F_Port\n");
                        break;
                default:
 -                      len += snprintf(buf + len, PAGE_SIZE-len, "Loop\n");
 +                      len += scnprintf(buf + len, PAGE_SIZE-len, "Loop\n");
                        break;
                }
        }
@@@ -1028,10 -1032,10 +1028,10 @@@ qla2x00_zio_show(struct device *dev, st
  
        switch (vha->hw->zio_mode) {
        case QLA_ZIO_MODE_6:
 -              len += snprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
 +              len += scnprintf(buf + len, PAGE_SIZE-len, "Mode 6\n");
                break;
        case QLA_ZIO_DISABLED:
 -              len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
 +              len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
                break;
        }
        return len;
@@@ -1071,7 -1075,7 +1071,7 @@@ qla2x00_zio_timer_show(struct device *d
  {
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  
 -      return snprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
 +      return scnprintf(buf, PAGE_SIZE, "%d us\n", vha->hw->zio_timer * 100);
  }
  
  static ssize_t
@@@ -1101,9 -1105,9 +1101,9 @@@ qla2x00_beacon_show(struct device *dev
        int len = 0;
  
        if (vha->hw->beacon_blink_led)
 -              len += snprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
 +              len += scnprintf(buf + len, PAGE_SIZE-len, "Enabled\n");
        else
 -              len += snprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
 +              len += scnprintf(buf + len, PAGE_SIZE-len, "Disabled\n");
        return len;
  }
  
@@@ -1145,7 -1149,7 +1145,7 @@@ qla2x00_optrom_bios_version_show(struc
  {
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
        struct qla_hw_data *ha = vha->hw;
 -      return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
 +      return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->bios_revision[1],
            ha->bios_revision[0]);
  }
  
@@@ -1155,7 -1159,7 +1155,7 @@@ qla2x00_optrom_efi_version_show(struct 
  {
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
        struct qla_hw_data *ha = vha->hw;
 -      return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
 +      return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->efi_revision[1],
            ha->efi_revision[0]);
  }
  
@@@ -1165,7 -1169,7 +1165,7 @@@ qla2x00_optrom_fcode_version_show(struc
  {
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
        struct qla_hw_data *ha = vha->hw;
 -      return snprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
 +      return scnprintf(buf, PAGE_SIZE, "%d.%02d\n", ha->fcode_revision[1],
            ha->fcode_revision[0]);
  }
  
@@@ -1175,7 -1179,7 +1175,7 @@@ qla2x00_optrom_fw_version_show(struct d
  {
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
        struct qla_hw_data *ha = vha->hw;
 -      return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
 +      return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d %d\n",
            ha->fw_revision[0], ha->fw_revision[1], ha->fw_revision[2],
            ha->fw_revision[3]);
  }
@@@ -1188,9 -1192,9 +1188,9 @@@ qla2x00_optrom_gold_fw_version_show(str
        struct qla_hw_data *ha = vha->hw;
  
        if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
 -              return snprintf(buf, PAGE_SIZE, "\n");
 +              return scnprintf(buf, PAGE_SIZE, "\n");
  
 -      return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
 +      return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%d)\n",
            ha->gold_fw_version[0], ha->gold_fw_version[1],
            ha->gold_fw_version[2], ha->gold_fw_version[3]);
  }
@@@ -1200,7 -1204,7 +1200,7 @@@ qla2x00_total_isp_aborts_show(struct de
                              struct device_attribute *attr, char *buf)
  {
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
 -      return snprintf(buf, PAGE_SIZE, "%d\n",
 +      return scnprintf(buf, PAGE_SIZE, "%d\n",
            vha->qla_stats.total_isp_aborts);
  }
  
@@@ -1214,16 -1218,16 +1214,16 @@@ qla24xx_84xx_fw_version_show(struct dev
        struct qla_hw_data *ha = vha->hw;
  
        if (!IS_QLA84XX(ha))
 -              return snprintf(buf, PAGE_SIZE, "\n");
 +              return scnprintf(buf, PAGE_SIZE, "\n");
  
        if (ha->cs84xx->op_fw_version == 0)
                rval = qla84xx_verify_chip(vha, status);
  
        if ((rval == QLA_SUCCESS) && (status[0] == 0))
 -              return snprintf(buf, PAGE_SIZE, "%u\n",
 +              return scnprintf(buf, PAGE_SIZE, "%u\n",
                        (uint32_t)ha->cs84xx->op_fw_version);
  
 -      return snprintf(buf, PAGE_SIZE, "\n");
 +      return scnprintf(buf, PAGE_SIZE, "\n");
  }
  
  static ssize_t
@@@ -1234,9 -1238,9 +1234,9 @@@ qla2x00_mpi_version_show(struct device 
        struct qla_hw_data *ha = vha->hw;
  
        if (!IS_QLA81XX(ha) && !IS_QLA8031(ha) && !IS_QLA8044(ha))
 -              return snprintf(buf, PAGE_SIZE, "\n");
 +              return scnprintf(buf, PAGE_SIZE, "\n");
  
 -      return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
 +      return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d (%x)\n",
            ha->mpi_version[0], ha->mpi_version[1], ha->mpi_version[2],
            ha->mpi_capabilities);
  }
@@@ -1249,9 -1253,9 +1249,9 @@@ qla2x00_phy_version_show(struct device 
        struct qla_hw_data *ha = vha->hw;
  
        if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
 -              return snprintf(buf, PAGE_SIZE, "\n");
 +              return scnprintf(buf, PAGE_SIZE, "\n");
  
 -      return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
 +      return scnprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
            ha->phy_version[0], ha->phy_version[1], ha->phy_version[2]);
  }
  
@@@ -1262,7 -1266,7 +1262,7 @@@ qla2x00_flash_block_size_show(struct de
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
        struct qla_hw_data *ha = vha->hw;
  
 -      return snprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
 +      return scnprintf(buf, PAGE_SIZE, "0x%x\n", ha->fdt_block_size);
  }
  
  static ssize_t
@@@ -1272,9 -1276,9 +1272,9 @@@ qla2x00_vlan_id_show(struct device *dev
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  
        if (!IS_CNA_CAPABLE(vha->hw))
 -              return snprintf(buf, PAGE_SIZE, "\n");
 +              return scnprintf(buf, PAGE_SIZE, "\n");
  
 -      return snprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
 +      return scnprintf(buf, PAGE_SIZE, "%d\n", vha->fcoe_vlan_id);
  }
  
  static ssize_t
@@@ -1284,9 -1288,9 +1284,9 @@@ qla2x00_vn_port_mac_address_show(struc
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  
        if (!IS_CNA_CAPABLE(vha->hw))
 -              return snprintf(buf, PAGE_SIZE, "\n");
 +              return scnprintf(buf, PAGE_SIZE, "\n");
  
 -      return snprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac);
 +      return scnprintf(buf, PAGE_SIZE, "%pMR\n", vha->fcoe_vn_port_mac);
  }
  
  static ssize_t
@@@ -1295,7 -1299,7 +1295,7 @@@ qla2x00_fabric_param_show(struct devic
  {
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  
 -      return snprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
 +      return scnprintf(buf, PAGE_SIZE, "%d\n", vha->hw->switch_cap);
  }
  
  static ssize_t
@@@ -1316,10 -1320,10 +1316,10 @@@ qla2x00_thermal_temp_show(struct devic
        }
  
        if (qla2x00_get_thermal_temp(vha, &temp) == QLA_SUCCESS)
 -              return snprintf(buf, PAGE_SIZE, "%d\n", temp);
 +              return scnprintf(buf, PAGE_SIZE, "%d\n", temp);
  
  done:
 -      return snprintf(buf, PAGE_SIZE, "\n");
 +      return scnprintf(buf, PAGE_SIZE, "\n");
  }
  
  static ssize_t
@@@ -1333,7 -1337,7 +1333,7 @@@ qla2x00_fw_state_show(struct device *de
  
        if (IS_QLAFX00(vha->hw)) {
                pstate = qlafx00_fw_state_show(dev, attr, buf);
 -              return snprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
 +              return scnprintf(buf, PAGE_SIZE, "0x%x\n", pstate);
        }
  
        if (qla2x00_reset_active(vha))
        if (rval != QLA_SUCCESS)
                memset(state, -1, sizeof(state));
  
 -      return snprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0],
 +      return scnprintf(buf, PAGE_SIZE, "0x%x 0x%x 0x%x 0x%x 0x%x\n", state[0],
            state[1], state[2], state[3], state[4]);
  }
  
@@@ -1355,9 -1359,9 +1355,9 @@@ qla2x00_diag_requests_show(struct devic
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  
        if (!IS_BIDI_CAPABLE(vha->hw))
 -              return snprintf(buf, PAGE_SIZE, "\n");
 +              return scnprintf(buf, PAGE_SIZE, "\n");
  
 -      return snprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count);
 +      return scnprintf(buf, PAGE_SIZE, "%llu\n", vha->bidi_stats.io_count);
  }
  
  static ssize_t
@@@ -1367,9 -1371,9 +1367,9 @@@ qla2x00_diag_megabytes_show(struct devi
        scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
  
        if (!IS_BIDI_CAPABLE(vha->hw))
 -              return snprintf(buf, PAGE_SIZE, "\n");
 +              return scnprintf(buf, PAGE_SIZE, "\n");
  
 -      return snprintf(buf, PAGE_SIZE, "%llu\n",
 +      return scnprintf(buf, PAGE_SIZE, "%llu\n",
            vha->bidi_stats.transfer_bytes >> 20);
  }
  
@@@ -1388,7 -1392,7 +1388,7 @@@ qla2x00_fw_dump_size_show(struct devic
        else
                size = ha->fw_dump_len;
  
 -      return snprintf(buf, PAGE_SIZE, "%d\n", size);
 +      return scnprintf(buf, PAGE_SIZE, "%d\n", size);
  }
  
  static DEVICE_ATTR(driver_version, S_IRUGO, qla2x00_drvr_version_show, NULL);
@@@ -1990,6 -1994,8 +1990,8 @@@ qla24xx_vport_delete(struct fc_vport *f
  
        vha->flags.delete_progress = 1;
  
+       qlt_remove_target(ha, vha);
        fc_remove_host(vha->host);
  
        scsi_remove_host(vha->host);
index 41d6491d7bd9ec9e376ef93c710774b793df6889,e7be8a5c31b93472a3b5e7b738ba9ce0337e0b79..e1fe95ef23e11353aed91a8da9dd1d1f8ef2a9c3
@@@ -862,6 -862,7 +862,6 @@@ struct mbx_cmd_32 
   */
  #define MBC_LOAD_RAM                  1       /* Load RAM. */
  #define MBC_EXECUTE_FIRMWARE          2       /* Execute firmware. */
 -#define MBC_WRITE_RAM_WORD            4       /* Write RAM word. */
  #define MBC_READ_RAM_WORD             5       /* Read RAM word. */
  #define MBC_MAILBOX_REGISTER_TEST     6       /* Wrap incoming mailboxes */
  #define MBC_VERIFY_CHECKSUM           7       /* Verify checksum. */
  /*
   * ISP24xx mailbox commands
   */
 +#define MBC_WRITE_SERDES              0x3     /* Write serdes word. */
 +#define MBC_READ_SERDES                       0x4     /* Read serdes word. */
  #define MBC_SERDES_PARAMS             0x10    /* Serdes Tx Parameters. */
  #define MBC_GET_IOCB_STATUS           0x12    /* Get IOCB status command. */
  #define MBC_PORT_PARAMS                       0x1A    /* Port iDMA Parameters. */
@@@ -2735,6 -2734,7 +2735,6 @@@ struct req_que 
        srb_t **outstanding_cmds;
        uint32_t current_outstanding_cmd;
        uint16_t num_outstanding_cmds;
 -#define       MAX_Q_DEPTH             32
        int max_q_depth;
  
        dma_addr_t  dma_fx00;
@@@ -2750,6 -2750,13 +2750,13 @@@ struct qlfc_fw 
        uint32_t len;
  };
  
+ struct scsi_qlt_host {
+       void *target_lport_ptr;
+       struct mutex tgt_mutex;
+       struct mutex tgt_host_action_mutex;
+       struct qla_tgt *qla_tgt;
+ };
  struct qlt_hw_data {
        /* Protected by hw lock */
        uint32_t enable_class_2:1;
        uint32_t __iomem *atio_q_in;
        uint32_t __iomem *atio_q_out;
  
-       void *target_lport_ptr;
        struct qla_tgt_func_tmpl *tgt_ops;
-       struct qla_tgt *qla_tgt;
        struct qla_tgt_cmd *cmds[DEFAULT_OUTSTANDING_COMMANDS];
        uint16_t current_handle;
  
        struct qla_tgt_vp_map *tgt_vp_map;
-       struct mutex tgt_mutex;
-       struct mutex tgt_host_action_mutex;
  
        int saved_set;
        uint16_t saved_exchange_count;
@@@ -3302,7 -3305,12 +3305,7 @@@ struct qla_hw_data 
        struct work_struct nic_core_reset;
        struct work_struct idc_state_handler;
        struct work_struct nic_core_unrecoverable;
 -
 -#define HOST_QUEUE_RAMPDOWN_INTERVAL           (60 * HZ)
 -#define HOST_QUEUE_RAMPUP_INTERVAL             (30 * HZ)
 -      unsigned long   host_last_rampdown_time;
 -      unsigned long   host_last_rampup_time;
 -      int             cfg_lun_q_depth;
 +      struct work_struct board_disable;
  
        struct mr_data_fx00 mr;
  
@@@ -3367,11 -3375,12 +3370,11 @@@ typedef struct scsi_qla_host 
  #define MPI_RESET_NEEDED      19      /* Initiate MPI FW reset */
  #define ISP_QUIESCE_NEEDED    20      /* Driver need some quiescence */
  #define SCR_PENDING           21      /* SCR in target mode */
 -#define HOST_RAMP_DOWN_QUEUE_DEPTH     22
 -#define HOST_RAMP_UP_QUEUE_DEPTH       23
 -#define PORT_UPDATE_NEEDED    24
 -#define FX00_RESET_RECOVERY   25
 -#define FX00_TARGET_SCAN      26
 -#define FX00_CRITEMP_RECOVERY 27
 +#define PORT_UPDATE_NEEDED    22
 +#define FX00_RESET_RECOVERY   23
 +#define FX00_TARGET_SCAN      24
 +#define FX00_CRITEMP_RECOVERY 25
 +#define FX00_HOST_INFO_RESEND 26
  
        uint32_t        device_flags;
  #define SWITCH_FOUND          BIT_0
  #define VP_ERR_FAB_LOGOUT     4
  #define VP_ERR_ADAP_NORESOURCES       5
        struct qla_hw_data *hw;
+       struct scsi_qlt_host vha_tgt;
        struct req_que *req;
        int             fw_heartbeat_counter;
        int             seconds_since_last_heartbeat;
index 38a1257e76e1ec432c6cc81893b54a27ee30081a,b596f8b2cfc07c270e1fca977418f14aba4b00e8..9e80d61e5a3aa0f62e0dbc6d9aeae8632d81935c
@@@ -471,7 -471,7 +471,7 @@@ static void qlt_schedule_sess_for_delet
                schedule_delayed_work(&tgt->sess_del_work, 0);
        else
                schedule_delayed_work(&tgt->sess_del_work,
 -                  jiffies - sess->expires);
 +                  sess->expires - jiffies);
  }
  
  /* ha->hardware_lock supposed to be held on entry */
@@@ -550,14 -550,13 +550,14 @@@ static void qlt_del_sess_work_fn(struc
        struct scsi_qla_host *vha = tgt->vha;
        struct qla_hw_data *ha = vha->hw;
        struct qla_tgt_sess *sess;
 -      unsigned long flags;
 +      unsigned long flags, elapsed;
  
        spin_lock_irqsave(&ha->hardware_lock, flags);
        while (!list_empty(&tgt->del_sess_list)) {
                sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
                    del_list_entry);
 -              if (time_after_eq(jiffies, sess->expires)) {
 +              elapsed = jiffies;
 +              if (time_after_eq(elapsed, sess->expires)) {
                        qlt_undelete_sess(sess);
  
                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
                        ha->tgt.tgt_ops->put_sess(sess);
                } else {
                        schedule_delayed_work(&tgt->sess_del_work,
 -                          jiffies - sess->expires);
 +                          sess->expires - elapsed);
                        break;
                }
        }
@@@ -590,7 -589,7 +590,7 @@@ static struct qla_tgt_sess *qlt_create_
  
        /* Check to avoid double sessions */
        spin_lock_irqsave(&ha->hardware_lock, flags);
-       list_for_each_entry(sess, &ha->tgt.qla_tgt->sess_list,
+       list_for_each_entry(sess, &vha->vha_tgt.qla_tgt->sess_list,
                                sess_list_entry) {
                if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
                        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
  
                return NULL;
        }
-       sess->tgt = ha->tgt.qla_tgt;
+       sess->tgt = vha->vha_tgt.qla_tgt;
        sess->vha = vha;
        sess->s_id = fcport->d_id;
        sess->loop_id = fcport->loop_id;
  
        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
            "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
-           sess, ha->tgt.qla_tgt);
+           sess, vha->vha_tgt.qla_tgt);
  
        be_sid[0] = sess->s_id.b.domain;
        be_sid[1] = sess->s_id.b.area;
        memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
  
        spin_lock_irqsave(&ha->hardware_lock, flags);
-       list_add_tail(&sess->sess_list_entry, &ha->tgt.qla_tgt->sess_list);
-       ha->tgt.qla_tgt->sess_count++;
+       list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
+       vha->vha_tgt.qla_tgt->sess_count++;
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
  
        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
  void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
  {
        struct qla_hw_data *ha = vha->hw;
-       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
        struct qla_tgt_sess *sess;
        unsigned long flags;
  
        if (!tgt || (fcport->port_type != FCT_INITIATOR))
                return;
  
+       if (qla_ini_mode_enabled(vha))
+               return;
        spin_lock_irqsave(&ha->hardware_lock, flags);
        if (tgt->tgt_stop) {
                spin_unlock_irqrestore(&ha->hardware_lock, flags);
        if (!sess) {
                spin_unlock_irqrestore(&ha->hardware_lock, flags);
  
-               mutex_lock(&ha->tgt.tgt_mutex);
+               mutex_lock(&vha->vha_tgt.tgt_mutex);
                sess = qlt_create_sess(vha, fcport, false);
-               mutex_unlock(&ha->tgt.tgt_mutex);
+               mutex_unlock(&vha->vha_tgt.tgt_mutex);
  
                spin_lock_irqsave(&ha->hardware_lock, flags);
        } else {
  void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
  {
        struct qla_hw_data *ha = vha->hw;
-       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
        struct qla_tgt_sess *sess;
        unsigned long flags;
  
@@@ -806,12 -808,12 +809,12 @@@ void qlt_stop_phase1(struct qla_tgt *tg
         * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
         * Lock is needed, because we still can get an incoming packet.
         */
-       mutex_lock(&ha->tgt.tgt_mutex);
+       mutex_lock(&vha->vha_tgt.tgt_mutex);
        spin_lock_irqsave(&ha->hardware_lock, flags);
        tgt->tgt_stop = 1;
        qlt_clear_tgt_db(tgt, true);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
-       mutex_unlock(&ha->tgt.tgt_mutex);
+       mutex_unlock(&vha->vha_tgt.tgt_mutex);
  
        flush_delayed_work(&tgt->sess_del_work);
  
@@@ -845,20 -847,21 +848,21 @@@ EXPORT_SYMBOL(qlt_stop_phase1)
  void qlt_stop_phase2(struct qla_tgt *tgt)
  {
        struct qla_hw_data *ha = tgt->ha;
+       scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
        unsigned long flags;
  
        if (tgt->tgt_stopped) {
-               ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf04f,
+               ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04f,
                    "Already in tgt->tgt_stopped state\n");
                dump_stack();
                return;
        }
  
-       ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00b,
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00b,
            "Waiting for %d IRQ commands to complete (tgt %p)",
            tgt->irq_cmd_count, tgt);
  
-       mutex_lock(&ha->tgt.tgt_mutex);
+       mutex_lock(&vha->vha_tgt.tgt_mutex);
        spin_lock_irqsave(&ha->hardware_lock, flags);
        while (tgt->irq_cmd_count != 0) {
                spin_unlock_irqrestore(&ha->hardware_lock, flags);
        tgt->tgt_stop = 0;
        tgt->tgt_stopped = 1;
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
-       mutex_unlock(&ha->tgt.tgt_mutex);
+       mutex_unlock(&vha->vha_tgt.tgt_mutex);
  
-       ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00c, "Stop of tgt %p finished",
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00c, "Stop of tgt %p finished",
            tgt);
  }
  EXPORT_SYMBOL(qlt_stop_phase2);
  /* Called from qlt_remove_target() -> qla2x00_remove_one() */
  static void qlt_release(struct qla_tgt *tgt)
  {
-       struct qla_hw_data *ha = tgt->ha;
+       scsi_qla_host_t *vha = tgt->vha;
  
-       if ((ha->tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
+       if ((vha->vha_tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
                qlt_stop_phase2(tgt);
  
-       ha->tgt.qla_tgt = NULL;
+       vha->vha_tgt.qla_tgt = NULL;
  
-       ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00d,
+       ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00d,
            "Release of tgt %p finished\n", tgt);
  
        kfree(tgt);
@@@ -949,8 -952,8 +953,8 @@@ static void qlt_send_notify_ack(struct 
                return;
        }
  
-       if (ha->tgt.qla_tgt != NULL)
-               ha->tgt.qla_tgt->notify_ack_expected++;
+       if (vha->vha_tgt.qla_tgt != NULL)
+               vha->vha_tgt.qla_tgt->notify_ack_expected++;
  
        pkt->entry_type = NOTIFY_ACK_TYPE;
        pkt->entry_count = 1;
@@@ -1054,7 -1057,7 +1058,7 @@@ static void qlt_24xx_send_abts_resp(str
                /* Other bytes are zero */
        }
  
-       ha->tgt.qla_tgt->abts_resp_expected++;
+       vha->vha_tgt.qla_tgt->abts_resp_expected++;
  
        qla2x00_start_iocbs(vha, vha->req);
  }
@@@ -1206,7 -1209,7 +1210,7 @@@ static void qlt_24xx_handle_abts(struc
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
                    "qla_target(%d): task abort for non-existant session\n",
                    vha->vp_idx);
-               rc = qlt_sched_sess_work(ha->tgt.qla_tgt,
+               rc = qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
                    QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
                if (rc != 0) {
                        qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
@@@ -2157,8 -2160,7 +2161,7 @@@ static int qlt_prepare_srr_ctio(struct 
        struct qla_tgt_cmd *cmd, void *ctio)
  {
        struct qla_tgt_srr_ctio *sc;
-       struct qla_hw_data *ha = vha->hw;
-       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
        struct qla_tgt_srr_imm *imm;
  
        tgt->ctio_srr_id++;
@@@ -2474,7 -2476,7 +2477,7 @@@ static void qlt_do_work(struct work_str
        struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
        scsi_qla_host_t *vha = cmd->vha;
        struct qla_hw_data *ha = vha->hw;
-       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
        struct qla_tgt_sess *sess = NULL;
        struct atio_from_isp *atio = &cmd->atio;
        unsigned char *cdb;
                        goto out_term;
                }
  
-               mutex_lock(&ha->tgt.tgt_mutex);
+               mutex_lock(&vha->vha_tgt.tgt_mutex);
                sess = qlt_make_local_sess(vha, s_id);
                /* sess has an extra creation ref. */
-               mutex_unlock(&ha->tgt.tgt_mutex);
+               mutex_unlock(&vha->vha_tgt.tgt_mutex);
  
                if (!sess)
                        goto out_term;
@@@ -2576,8 -2578,7 +2579,7 @@@ out_term
  static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
        struct atio_from_isp *atio)
  {
-       struct qla_hw_data *ha = vha->hw;
-       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
        struct qla_tgt_cmd *cmd;
  
        if (unlikely(tgt->tgt_stop)) {
  
        memcpy(&cmd->atio, atio, sizeof(*atio));
        cmd->state = QLA_TGT_STATE_NEW;
-       cmd->tgt = ha->tgt.qla_tgt;
+       cmd->tgt = vha->vha_tgt.qla_tgt;
        cmd->vha = vha;
  
        INIT_WORK(&cmd->work, qlt_do_work);
@@@ -2723,7 -2724,7 +2725,7 @@@ static int qlt_handle_task_mgmt(struct 
        uint32_t lun, unpacked_lun;
        int lun_size, fn;
  
-       tgt = ha->tgt.qla_tgt;
+       tgt = vha->vha_tgt.qla_tgt;
  
        lun = a->u.isp24.fcp_cmnd.lun;
        lun_size = sizeof(a->u.isp24.fcp_cmnd.lun);
@@@ -2797,7 -2798,7 +2799,7 @@@ static int qlt_abort_task(struct scsi_q
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
                    "qla_target(%d): task abort for unexisting "
                    "session\n", vha->vp_idx);
-               return qlt_sched_sess_work(ha->tgt.qla_tgt,
+               return qlt_sched_sess_work(vha->vha_tgt.qla_tgt,
                    QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
        }
  
  static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
        struct imm_ntfy_from_isp *iocb)
  {
-       struct qla_hw_data *ha = vha->hw;
        int res = 0;
  
        ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
        case ELS_PDISC:
        case ELS_ADISC:
        {
-               struct qla_tgt *tgt = ha->tgt.qla_tgt;
+               struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
                if (tgt->link_reinit_iocb_pending) {
                        qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
                            0, 0, 0, 0, 0, 0);
@@@ -3202,8 -3202,7 +3203,7 @@@ static void qlt_prepare_srr_imm(struct 
        struct imm_ntfy_from_isp *iocb)
  {
        struct qla_tgt_srr_imm *imm;
-       struct qla_hw_data *ha = vha->hw;
-       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
        struct qla_tgt_srr_ctio *sctio;
  
        tgt->imm_srr_id++;
@@@ -3313,7 -3312,7 +3313,7 @@@ static void qlt_handle_imm_notify(struc
  
        case IMM_NTFY_LIP_LINK_REINIT:
        {
-               struct qla_tgt *tgt = ha->tgt.qla_tgt;
+               struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
                    "qla_target(%d): LINK REINIT (loop %#x, "
                    "subcode %x)\n", vha->vp_idx,
@@@ -3489,7 -3488,7 +3489,7 @@@ static void qlt_24xx_atio_pkt(struct sc
        struct atio_from_isp *atio)
  {
        struct qla_hw_data *ha = vha->hw;
-       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
        int rc;
  
        if (unlikely(tgt == NULL)) {
  static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
  {
        struct qla_hw_data *ha = vha->hw;
-       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
  
        if (unlikely(tgt == NULL)) {
                ql_dbg(ql_dbg_tgt, vha, 0xe05d,
@@@ -3794,7 -3793,7 +3794,7 @@@ void qlt_async_event(uint16_t code, str
        uint16_t *mailbox)
  {
        struct qla_hw_data *ha = vha->hw;
-       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
        int login_code;
  
        ql_dbg(ql_dbg_tgt, vha, 0xe039,
@@@ -3924,14 -3923,14 +3924,14 @@@ static fc_port_t *qlt_get_port_database
  static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
        uint8_t *s_id)
  {
-       struct qla_hw_data *ha = vha->hw;
        struct qla_tgt_sess *sess = NULL;
        fc_port_t *fcport = NULL;
        int rc, global_resets;
        uint16_t loop_id = 0;
  
  retry:
-       global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
+       global_resets =
+           atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
  
        rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
        if (rc != 0) {
                return NULL;
  
        if (global_resets !=
-           atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
+           atomic_read(&vha->vha_tgt.qla_tgt->tgt_global_resets_count)) {
                ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
                    "qla_target(%d): global reset during session discovery "
                    "(counter was %d, new %d), retrying", vha->vp_idx,
                    global_resets,
-                   atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
+                   atomic_read(&vha->vha_tgt.
+                       qla_tgt->tgt_global_resets_count));
                goto retry;
        }
  
@@@ -3998,10 -3998,10 +3999,10 @@@ static void qlt_abort_work(struct qla_t
        if (!sess) {
                spin_unlock_irqrestore(&ha->hardware_lock, flags);
  
-               mutex_lock(&ha->tgt.tgt_mutex);
+               mutex_lock(&vha->vha_tgt.tgt_mutex);
                sess = qlt_make_local_sess(vha, s_id);
                /* sess has got an extra creation ref */
-               mutex_unlock(&ha->tgt.tgt_mutex);
+               mutex_unlock(&vha->vha_tgt.tgt_mutex);
  
                spin_lock_irqsave(&ha->hardware_lock, flags);
                if (!sess)
@@@ -4052,10 -4052,10 +4053,10 @@@ static void qlt_tmr_work(struct qla_tg
        if (!sess) {
                spin_unlock_irqrestore(&ha->hardware_lock, flags);
  
-               mutex_lock(&ha->tgt.tgt_mutex);
+               mutex_lock(&vha->vha_tgt.tgt_mutex);
                sess = qlt_make_local_sess(vha, s_id);
                /* sess has got an extra creation ref */
-               mutex_unlock(&ha->tgt.tgt_mutex);
+               mutex_unlock(&vha->vha_tgt.tgt_mutex);
  
                spin_lock_irqsave(&ha->hardware_lock, flags);
                if (!sess)
@@@ -4141,9 -4141,9 +4142,9 @@@ int qlt_add_target(struct qla_hw_data *
        }
  
        ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
-           "Registering target for host %ld(%p)", base_vha->host_no, ha);
+           "Registering target for host %ld(%p).\n", base_vha->host_no, ha);
  
-       BUG_ON((ha->tgt.qla_tgt != NULL) || (ha->tgt.tgt_ops != NULL));
+       BUG_ON(base_vha->vha_tgt.qla_tgt != NULL);
  
        tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
        if (!tgt) {
        INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
        atomic_set(&tgt->tgt_global_resets_count, 0);
  
-       ha->tgt.qla_tgt = tgt;
+       base_vha->vha_tgt.qla_tgt = tgt;
  
        ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
                "qla_target(%d): using 64 Bit PCI addressing",
  /* Must be called under tgt_host_action_mutex */
  int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
  {
-       if (!ha->tgt.qla_tgt)
+       if (!vha->vha_tgt.qla_tgt)
                return 0;
  
        mutex_lock(&qla_tgt_mutex);
-       list_del(&ha->tgt.qla_tgt->tgt_list_entry);
+       list_del(&vha->vha_tgt.qla_tgt->tgt_list_entry);
        mutex_unlock(&qla_tgt_mutex);
  
        ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
            vha->host_no, ha);
-       qlt_release(ha->tgt.qla_tgt);
+       qlt_release(vha->vha_tgt.qla_tgt);
  
        return 0;
  }
@@@ -4235,8 -4235,9 +4236,9 @@@ static void qlt_lport_dump(struct scsi_
   * @callback:  lport initialization callback for tcm_qla2xxx code
   * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
   */
- int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
-       int (*callback)(struct scsi_qla_host *), void *target_lport_ptr)
+ int qlt_lport_register(void *target_lport_ptr, u64 phys_wwpn,
+                      u64 npiv_wwpn, u64 npiv_wwnn,
+                      int (*callback)(struct scsi_qla_host *, void *, u64, u64))
  {
        struct qla_tgt *tgt;
        struct scsi_qla_host *vha;
                if (!host)
                        continue;
  
-               if (ha->tgt.tgt_ops != NULL)
-                       continue;
                if (!(host->hostt->supported_mode & MODE_TARGET))
                        continue;
  
                spin_lock_irqsave(&ha->hardware_lock, flags);
-               if (host->active_mode & MODE_TARGET) {
+               if ((!npiv_wwpn || !npiv_wwnn) && host->active_mode & MODE_TARGET) {
                        pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
                            host->host_no);
                        spin_unlock_irqrestore(&ha->hardware_lock, flags);
                            " qla2xxx scsi_host\n");
                        continue;
                }
-               qlt_lport_dump(vha, wwpn, b);
+               qlt_lport_dump(vha, phys_wwpn, b);
  
                if (memcmp(vha->port_name, b, WWN_SIZE)) {
                        scsi_host_put(host);
                        continue;
                }
-               /*
-                * Setup passed parameters ahead of invoking callback
-                */
-               ha->tgt.tgt_ops = qla_tgt_ops;
-               ha->tgt.target_lport_ptr = target_lport_ptr;
-               rc = (*callback)(vha);
-               if (rc != 0) {
-                       ha->tgt.tgt_ops = NULL;
-                       ha->tgt.target_lport_ptr = NULL;
-                       scsi_host_put(host);
-               }
                mutex_unlock(&qla_tgt_mutex);
+               rc = (*callback)(vha, target_lport_ptr, npiv_wwpn, npiv_wwnn);
+               if (rc != 0)
+                       scsi_host_put(host);
                return rc;
        }
        mutex_unlock(&qla_tgt_mutex);
@@@ -4314,7 -4306,7 +4307,7 @@@ void qlt_lport_deregister(struct scsi_q
        /*
         * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
         */
-       ha->tgt.target_lport_ptr = NULL;
+       vha->vha_tgt.target_lport_ptr = NULL;
        ha->tgt.tgt_ops = NULL;
        /*
         * Release the Scsi_Host reference for the underlying qla2xxx host
@@@ -4376,8 -4368,9 +4369,9 @@@ voi
  qlt_enable_vha(struct scsi_qla_host *vha)
  {
        struct qla_hw_data *ha = vha->hw;
-       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
        unsigned long flags;
+       scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev);
  
        if (!tgt) {
                ql_dbg(ql_dbg_tgt, vha, 0xe069,
        qlt_set_mode(vha);
        spin_unlock_irqrestore(&ha->hardware_lock, flags);
  
-       set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
-       qla2xxx_wake_dpc(vha);
-       qla2x00_wait_for_hba_online(vha);
+       if (vha->vp_idx) {
+               qla24xx_disable_vp(vha);
+               qla24xx_enable_vp(vha);
+       } else {
+               set_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags);
+               qla2xxx_wake_dpc(base_vha);
+               qla2x00_wait_for_hba_online(base_vha);
+       }
  }
  EXPORT_SYMBOL(qlt_enable_vha);
  
@@@ -4407,7 -4405,7 +4406,7 @@@ voi
  qlt_disable_vha(struct scsi_qla_host *vha)
  {
        struct qla_hw_data *ha = vha->hw;
-       struct qla_tgt *tgt = ha->tgt.qla_tgt;
+       struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
        unsigned long flags;
  
        if (!tgt) {
@@@ -4438,8 -4436,10 +4437,10 @@@ qlt_vport_create(struct scsi_qla_host *
        if (!qla_tgt_mode_enabled(vha))
                return;
  
-       mutex_init(&ha->tgt.tgt_mutex);
-       mutex_init(&ha->tgt.tgt_host_action_mutex);
+       vha->vha_tgt.qla_tgt = NULL;
+       mutex_init(&vha->vha_tgt.tgt_mutex);
+       mutex_init(&vha->vha_tgt.tgt_host_action_mutex);
  
        qlt_clear_mode(vha);
  
         * assigning the value appropriately.
         */
        ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
+       qlt_add_target(ha, vha);
  }
  
  void
@@@ -4768,8 -4770,8 +4771,8 @@@ qlt_probe_one_stage1(struct scsi_qla_ho
                ISP_ATIO_Q_OUT(base_vha) = &ha->iobase->isp24.atio_q_out;
        }
  
-       mutex_init(&ha->tgt.tgt_mutex);
-       mutex_init(&ha->tgt.tgt_host_action_mutex);
+       mutex_init(&base_vha->vha_tgt.tgt_mutex);
+       mutex_init(&base_vha->vha_tgt.tgt_host_action_mutex);
        qlt_clear_mode(base_vha);
  }
  
index 00867190413c78d1f3226348e5c68718077c0b9f,a99637e9e82017757239db64673986b71adcffa7..7f1a7ce4b771a791cdf636906fd5fcdcc16c9ba6
@@@ -52,7 -52,7 +52,7 @@@
  static LIST_HEAD(g_tiqn_list);
  static LIST_HEAD(g_np_list);
  static DEFINE_SPINLOCK(tiqn_lock);
- static DEFINE_SPINLOCK(np_lock);
+ static DEFINE_MUTEX(np_lock);
  
  static struct idr tiqn_idr;
  struct idr sess_idr;
@@@ -307,6 -307,9 +307,9 @@@ bool iscsit_check_np_match
        return false;
  }
  
+ /*
+  * Called with mutex np_lock held
+  */
  static struct iscsi_np *iscsit_get_np(
        struct __kernel_sockaddr_storage *sockaddr,
        int network_transport)
        struct iscsi_np *np;
        bool match;
  
-       spin_lock_bh(&np_lock);
        list_for_each_entry(np, &g_np_list, np_list) {
-               spin_lock(&np->np_thread_lock);
+               spin_lock_bh(&np->np_thread_lock);
                if (np->np_thread_state != ISCSI_NP_THREAD_ACTIVE) {
-                       spin_unlock(&np->np_thread_lock);
+                       spin_unlock_bh(&np->np_thread_lock);
                        continue;
                }
  
                         * while iscsi_tpg_add_network_portal() is called.
                         */
                        np->np_exports++;
-                       spin_unlock(&np->np_thread_lock);
-                       spin_unlock_bh(&np_lock);
+                       spin_unlock_bh(&np->np_thread_lock);
                        return np;
                }
-               spin_unlock(&np->np_thread_lock);
+               spin_unlock_bh(&np->np_thread_lock);
        }
-       spin_unlock_bh(&np_lock);
  
        return NULL;
  }
@@@ -350,16 -350,22 +350,22 @@@ struct iscsi_np *iscsit_add_np
        struct sockaddr_in6 *sock_in6;
        struct iscsi_np *np;
        int ret;
+       mutex_lock(&np_lock);
        /*
         * Locate the existing struct iscsi_np if already active..
         */
        np = iscsit_get_np(sockaddr, network_transport);
-       if (np)
+       if (np) {
+               mutex_unlock(&np_lock);
                return np;
+       }
  
        np = kzalloc(sizeof(struct iscsi_np), GFP_KERNEL);
        if (!np) {
                pr_err("Unable to allocate memory for struct iscsi_np\n");
+               mutex_unlock(&np_lock);
                return ERR_PTR(-ENOMEM);
        }
  
        ret = iscsi_target_setup_login_socket(np, sockaddr);
        if (ret != 0) {
                kfree(np);
+               mutex_unlock(&np_lock);
                return ERR_PTR(ret);
        }
  
                pr_err("Unable to create kthread: iscsi_np\n");
                ret = PTR_ERR(np->np_thread);
                kfree(np);
+               mutex_unlock(&np_lock);
                return ERR_PTR(ret);
        }
        /*
         * point because iscsi_np has not been added to g_np_list yet.
         */
        np->np_exports = 1;
+       np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
  
-       spin_lock_bh(&np_lock);
        list_add_tail(&np->np_list, &g_np_list);
-       spin_unlock_bh(&np_lock);
+       mutex_unlock(&np_lock);
  
        pr_debug("CORE[0] - Added Network Portal: %s:%hu on %s\n",
                np->np_ip, np->np_port, np->np_transport->name);
@@@ -465,14 -473,13 +473,14 @@@ int iscsit_del_np(struct iscsi_np *np
                 */
                send_sig(SIGINT, np->np_thread, 1);
                kthread_stop(np->np_thread);
 +              np->np_thread = NULL;
        }
  
        np->np_transport->iscsit_free_np(np);
  
-       spin_lock_bh(&np_lock);
+       mutex_lock(&np_lock);
        list_del(&np->np_list);
-       spin_unlock_bh(&np_lock);
+       mutex_unlock(&np_lock);
  
        pr_debug("CORE[0] - Removed Network Portal: %s:%hu on %s\n",
                np->np_ip, np->np_port, np->np_transport->name);
@@@ -622,7 -629,7 +630,7 @@@ static int iscsit_add_reject
  {
        struct iscsi_cmd *cmd;
  
-       cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+       cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
        if (!cmd)
                return -1;
  
@@@ -824,22 -831,24 +832,22 @@@ int iscsit_setup_scsi_cmd(struct iscsi_
        if (((hdr->flags & ISCSI_FLAG_CMD_READ) ||
             (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) {
                /*
 -               * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2)
 -               * that adds support for RESERVE/RELEASE.  There is a bug
 -               * add with this new functionality that sets R/W bits when
 -               * neither CDB carries any READ or WRITE datapayloads.
 +               * From RFC-3720 Section 10.3.1:
 +               *
 +               * "Either or both of R and W MAY be 1 when either the
 +               *  Expected Data Transfer Length and/or Bidirectional Read
 +               *  Expected Data Transfer Length are 0"
 +               *
 +               * For this case, go ahead and clear the unnecssary bits
 +               * to avoid any confusion with ->data_direction.
                 */
 -              if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) {
 -                      hdr->flags &= ~ISCSI_FLAG_CMD_READ;
 -                      hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
 -                      goto done;
 -              }
 +              hdr->flags &= ~ISCSI_FLAG_CMD_READ;
 +              hdr->flags &= ~ISCSI_FLAG_CMD_WRITE;
  
 -              pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
 +              pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE"
                        " set when Expected Data Transfer Length is 0 for"
 -                      " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]);
 -              return iscsit_add_reject_cmd(cmd,
 -                                           ISCSI_REASON_BOOKMARK_INVALID, buf);
 +                      " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]);
        }
 -done:
  
        if (!(hdr->flags & ISCSI_FLAG_CMD_READ) &&
            !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) {
@@@ -2475,7 -2484,7 +2483,7 @@@ static void iscsit_build_conn_drop_asyn
        if (!conn_p)
                return;
  
-       cmd = iscsit_allocate_cmd(conn_p, GFP_ATOMIC);
+       cmd = iscsit_allocate_cmd(conn_p, TASK_RUNNING);
        if (!cmd) {
                iscsit_dec_conn_usage_count(conn_p);
                return;
@@@ -3951,7 -3960,7 +3959,7 @@@ static int iscsi_target_rx_opcode(struc
  
        switch (hdr->opcode & ISCSI_OPCODE_MASK) {
        case ISCSI_OP_SCSI_CMD:
-               cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+               cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
                if (!cmd)
                        goto reject;
  
        case ISCSI_OP_NOOP_OUT:
                cmd = NULL;
                if (hdr->ttt == cpu_to_be32(0xFFFFFFFF)) {
-                       cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+                       cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
                        if (!cmd)
                                goto reject;
                }
                ret = iscsit_handle_nop_out(conn, cmd, buf);
                break;
        case ISCSI_OP_SCSI_TMFUNC:
-               cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+               cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
                if (!cmd)
                        goto reject;
  
                ret = iscsit_handle_task_mgt_cmd(conn, cmd, buf);
                break;
        case ISCSI_OP_TEXT:
-               cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+               cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
                if (!cmd)
                        goto reject;
  
                ret = iscsit_handle_text_cmd(conn, cmd, buf);
                break;
        case ISCSI_OP_LOGOUT:
-               cmd = iscsit_allocate_cmd(conn, GFP_KERNEL);
+               cmd = iscsit_allocate_cmd(conn, TASK_INTERRUPTIBLE);
                if (!cmd)
                        goto reject;
  
index d06de84b069bb0c283495bdf09da4b3e9b96ba2f,883099e89020268f1c600fa0e1d81d1053227238..65001e1336702966108081443d5a44f39988d5af
@@@ -918,6 -918,90 +918,90 @@@ int se_dev_set_emulate_3pc(struct se_de
        return 0;
  }
  
+ int se_dev_set_pi_prot_type(struct se_device *dev, int flag)
+ {
+       int rc, old_prot = dev->dev_attrib.pi_prot_type;
+       if (flag != 0 && flag != 1 && flag != 2 && flag != 3) {
+               pr_err("Illegal value %d for pi_prot_type\n", flag);
+               return -EINVAL;
+       }
+       if (flag == 2) {
+               pr_err("DIF TYPE2 protection currently not supported\n");
+               return -ENOSYS;
+       }
+       if (dev->dev_attrib.hw_pi_prot_type) {
+               pr_warn("DIF protection enabled on underlying hardware,"
+                       " ignoring\n");
+               return 0;
+       }
+       if (!dev->transport->init_prot || !dev->transport->free_prot) {
+               pr_err("DIF protection not supported by backend: %s\n",
+                      dev->transport->name);
+               return -ENOSYS;
+       }
+       if (!(dev->dev_flags & DF_CONFIGURED)) {
+               pr_err("DIF protection requires device to be configured\n");
+               return -ENODEV;
+       }
+       if (dev->export_count) {
+               pr_err("dev[%p]: Unable to change SE Device PROT type while"
+                      " export_count is %d\n", dev, dev->export_count);
+               return -EINVAL;
+       }
+       dev->dev_attrib.pi_prot_type = flag;
+       if (flag && !old_prot) {
+               rc = dev->transport->init_prot(dev);
+               if (rc) {
+                       dev->dev_attrib.pi_prot_type = old_prot;
+                       return rc;
+               }
+       } else if (!flag && old_prot) {
+               dev->transport->free_prot(dev);
+       }
+       pr_debug("dev[%p]: SE Device Protection Type: %d\n", dev, flag);
+       return 0;
+ }
+ int se_dev_set_pi_prot_format(struct se_device *dev, int flag)
+ {
+       int rc;
+       if (!flag)
+               return 0;
+       if (flag != 1) {
+               pr_err("Illegal value %d for pi_prot_format\n", flag);
+               return -EINVAL;
+       }
+       if (!dev->transport->format_prot) {
+               pr_err("DIF protection format not supported by backend %s\n",
+                      dev->transport->name);
+               return -ENOSYS;
+       }
+       if (!(dev->dev_flags & DF_CONFIGURED)) {
+               pr_err("DIF protection format requires device to be configured\n");
+               return -ENODEV;
+       }
+       if (dev->export_count) {
+               pr_err("dev[%p]: Unable to format SE Device PROT type while"
+                      " export_count is %d\n", dev, dev->export_count);
+               return -EINVAL;
+       }
+       rc = dev->transport->format_prot(dev);
+       if (rc)
+               return rc;
+       pr_debug("dev[%p]: SE Device Protection Format complete\n", dev);
+       return 0;
+ }
  int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
  {
        if ((flag != 0) && (flag != 1)) {
@@@ -1106,34 -1190,29 +1190,34 @@@ int se_dev_set_block_size(struct se_dev
        dev->dev_attrib.block_size = block_size;
        pr_debug("dev[%p]: SE Device block_size changed to %u\n",
                        dev, block_size);
 +
 +      if (dev->dev_attrib.max_bytes_per_io)
 +              dev->dev_attrib.hw_max_sectors =
 +                      dev->dev_attrib.max_bytes_per_io / block_size;
 +
        return 0;
  }
  
  struct se_lun *core_dev_add_lun(
        struct se_portal_group *tpg,
        struct se_device *dev,
-       u32 lun)
+       u32 unpacked_lun)
  {
-       struct se_lun *lun_p;
+       struct se_lun *lun;
        int rc;
  
-       lun_p = core_tpg_pre_addlun(tpg, lun);
-       if (IS_ERR(lun_p))
-               return lun_p;
+       lun = core_tpg_alloc_lun(tpg, unpacked_lun);
+       if (IS_ERR(lun))
+               return lun;
  
-       rc = core_tpg_post_addlun(tpg, lun_p,
+       rc = core_tpg_add_lun(tpg, lun,
                                TRANSPORT_LUNFLAGS_READ_WRITE, dev);
        if (rc < 0)
                return ERR_PTR(rc);
  
        pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
                " CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
-               tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
+               tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
                tpg->se_tpg_tfo->get_fabric_name(), dev->se_hba->hba_id);
        /*
         * Update LUN maps for dynamically added initiators when
                spin_unlock_irq(&tpg->acl_node_lock);
        }
  
-       return lun_p;
+       return lun;
  }
  
  /*      core_dev_del_lun():
@@@ -1420,6 -1499,7 +1504,7 @@@ struct se_device *target_alloc_device(s
        dev->dev_link_magic = SE_DEV_LINK_MAGIC;
        dev->se_hba = hba;
        dev->transport = hba->transport;
+       dev->prot_length = sizeof(struct se_dif_v1_tuple);
  
        INIT_LIST_HEAD(&dev->dev_list);
        INIT_LIST_HEAD(&dev->dev_sep_list);
        spin_lock_init(&dev->t10_pr.aptpl_reg_lock);
        INIT_LIST_HEAD(&dev->t10_alua.tg_pt_gps_list);
        spin_lock_init(&dev->t10_alua.tg_pt_gps_lock);
+       INIT_LIST_HEAD(&dev->t10_alua.lba_map_list);
+       spin_lock_init(&dev->t10_alua.lba_map_lock);
  
        dev->t10_wwn.t10_dev = dev;
        dev->t10_alua.t10_dev = dev;
        dev->dev_attrib.emulate_tpws = DA_EMULATE_TPWS;
        dev->dev_attrib.emulate_caw = DA_EMULATE_CAW;
        dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
+       dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
        dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
        dev->dev_attrib.is_nonrot = DA_IS_NONROT;
        dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
@@@ -1588,9 -1671,13 +1676,13 @@@ void target_free_device(struct se_devic
        }
  
        core_alua_free_lu_gp_mem(dev);
+       core_alua_set_lba_map(dev, NULL, 0, 0);
        core_scsi3_free_all_registrations(dev);
        se_release_vpd_for_dev(dev);
  
+       if (dev->transport->free_prot)
+               dev->transport->free_prot(dev);
        dev->transport->free_device(dev);
  }
  
index 78241a53b555fc5600d0a6ffe7b8d8b4e15687d0,aaba7c588c4d4755200aabd27052fdb037aeeb44..cf991a91a8a9699f655fd325f654cb16ed3046d5
@@@ -66,8 -66,9 +66,8 @@@ static int fd_attach_hba(struct se_hba 
        pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
                " Target Core Stack %s\n", hba->hba_id, FD_VERSION,
                TARGET_CORE_MOD_VERSION);
 -      pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
 -              " MaxSectors: %u\n",
 -              hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
 +      pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic\n",
 +              hba->hba_id, fd_host->fd_host_id);
  
        return 0;
  }
@@@ -219,8 -220,7 +219,8 @@@ static int fd_configure_device(struct s
        }
  
        dev->dev_attrib.hw_block_size = fd_dev->fd_block_size;
 -      dev->dev_attrib.hw_max_sectors = FD_MAX_SECTORS;
 +      dev->dev_attrib.max_bytes_per_io = FD_MAX_BYTES;
 +      dev->dev_attrib.hw_max_sectors = FD_MAX_BYTES / fd_dev->fd_block_size;
        dev->dev_attrib.hw_queue_depth = FD_MAX_DEVICE_QUEUE_DEPTH;
  
        if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE) {
@@@ -257,6 -257,72 +257,72 @@@ static void fd_free_device(struct se_de
        kfree(fd_dev);
  }
  
+ static int fd_do_prot_rw(struct se_cmd *cmd, struct fd_prot *fd_prot,
+                        int is_write)
+ {
+       struct se_device *se_dev = cmd->se_dev;
+       struct fd_dev *dev = FD_DEV(se_dev);
+       struct file *prot_fd = dev->fd_prot_file;
+       struct scatterlist *sg;
+       loff_t pos = (cmd->t_task_lba * se_dev->prot_length);
+       unsigned char *buf;
+       u32 prot_size, len, size;
+       int rc, ret = 1, i;
+       prot_size = (cmd->data_length / se_dev->dev_attrib.block_size) *
+                    se_dev->prot_length;
+       if (!is_write) {
+               fd_prot->prot_buf = vzalloc(prot_size);
+               if (!fd_prot->prot_buf) {
+                       pr_err("Unable to allocate fd_prot->prot_buf\n");
+                       return -ENOMEM;
+               }
+               buf = fd_prot->prot_buf;
+               fd_prot->prot_sg_nents = cmd->t_prot_nents;
+               fd_prot->prot_sg = kzalloc(sizeof(struct scatterlist) *
+                                          fd_prot->prot_sg_nents, GFP_KERNEL);
+               if (!fd_prot->prot_sg) {
+                       pr_err("Unable to allocate fd_prot->prot_sg\n");
+                       vfree(fd_prot->prot_buf);
+                       return -ENOMEM;
+               }
+               size = prot_size;
+               for_each_sg(fd_prot->prot_sg, sg, fd_prot->prot_sg_nents, i) {
+                       len = min_t(u32, PAGE_SIZE, size);
+                       sg_set_buf(sg, buf, len);
+                       size -= len;
+                       buf += len;
+               }
+       }
+       if (is_write) {
+               rc = kernel_write(prot_fd, fd_prot->prot_buf, prot_size, pos);
+               if (rc < 0 || prot_size != rc) {
+                       pr_err("kernel_write() for fd_do_prot_rw failed:"
+                              " %d\n", rc);
+                       ret = -EINVAL;
+               }
+       } else {
+               rc = kernel_read(prot_fd, pos, fd_prot->prot_buf, prot_size);
+               if (rc < 0) {
+                       pr_err("kernel_read() for fd_do_prot_rw failed:"
+                              " %d\n", rc);
+                       ret = -EINVAL;
+               }
+       }
+       if (is_write || ret < 0) {
+               kfree(fd_prot->prot_sg);
+               vfree(fd_prot->prot_buf);
+       }
+       return ret;
+ }
  static int fd_do_rw(struct se_cmd *cmd, struct scatterlist *sgl,
                u32 sgl_nents, int is_write)
  {
@@@ -551,6 -617,8 +617,8 @@@ fd_execute_rw(struct se_cmd *cmd, struc
              enum dma_data_direction data_direction)
  {
        struct se_device *dev = cmd->se_dev;
+       struct fd_prot fd_prot;
+       sense_reason_t rc;
        int ret = 0;
  
        /*
         * physical memory addresses to struct iovec virtual memory.
         */
        if (data_direction == DMA_FROM_DEVICE) {
+               memset(&fd_prot, 0, sizeof(struct fd_prot));
+               if (cmd->prot_type) {
+                       ret = fd_do_prot_rw(cmd, &fd_prot, false);
+                       if (ret < 0)
+                               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               }
                ret = fd_do_rw(cmd, sgl, sgl_nents, 0);
+               if (ret > 0 && cmd->prot_type) {
+                       u32 sectors = cmd->data_length / dev->dev_attrib.block_size;
+                       rc = sbc_dif_verify_read(cmd, cmd->t_task_lba, sectors,
+                                                0, fd_prot.prot_sg, 0);
+                       if (rc) {
+                               kfree(fd_prot.prot_sg);
+                               vfree(fd_prot.prot_buf);
+                               return rc;
+                       }
+                       kfree(fd_prot.prot_sg);
+                       vfree(fd_prot.prot_buf);
+               }
        } else {
+               memset(&fd_prot, 0, sizeof(struct fd_prot));
+               if (cmd->prot_type) {
+                       u32 sectors = cmd->data_length / dev->dev_attrib.block_size;
+                       ret = fd_do_prot_rw(cmd, &fd_prot, false);
+                       if (ret < 0)
+                               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+                       rc = sbc_dif_verify_write(cmd, cmd->t_task_lba, sectors,
+                                                 0, fd_prot.prot_sg, 0);
+                       if (rc) {
+                               kfree(fd_prot.prot_sg);
+                               vfree(fd_prot.prot_buf);
+                               return rc;
+                       }
+               }
                ret = fd_do_rw(cmd, sgl, sgl_nents, 1);
                /*
                 * Perform implicit vfs_fsync_range() for fd_do_writev() ops
  
                        vfs_fsync_range(fd_dev->fd_file, start, end, 1);
                }
+               if (ret > 0 && cmd->prot_type) {
+                       ret = fd_do_prot_rw(cmd, &fd_prot, true);
+                       if (ret < 0)
+                               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+               }
        }
  
-       if (ret < 0)
+       if (ret < 0) {
+               kfree(fd_prot.prot_sg);
+               vfree(fd_prot.prot_buf);
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
+       }
  
        if (ret)
                target_complete_cmd(cmd, SAM_STAT_GOOD);
@@@ -700,6 -817,140 +817,140 @@@ static sector_t fd_get_blocks(struct se
                       dev->dev_attrib.block_size);
  }
  
+ static int fd_init_prot(struct se_device *dev)
+ {
+       struct fd_dev *fd_dev = FD_DEV(dev);
+       struct file *prot_file, *file = fd_dev->fd_file;
+       struct inode *inode;
+       int ret, flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
+       char buf[FD_MAX_DEV_PROT_NAME];
+       if (!file) {
+               pr_err("Unable to locate fd_dev->fd_file\n");
+               return -ENODEV;
+       }
+       inode = file->f_mapping->host;
+       if (S_ISBLK(inode->i_mode)) {
+               pr_err("FILEIO Protection emulation only supported on"
+                      " !S_ISBLK\n");
+               return -ENOSYS;
+       }
+       if (fd_dev->fbd_flags & FDBD_HAS_BUFFERED_IO_WCE)
+               flags &= ~O_DSYNC;
+       snprintf(buf, FD_MAX_DEV_PROT_NAME, "%s.protection",
+                fd_dev->fd_dev_name);
+       prot_file = filp_open(buf, flags, 0600);
+       if (IS_ERR(prot_file)) {
+               pr_err("filp_open(%s) failed\n", buf);
+               ret = PTR_ERR(prot_file);
+               return ret;
+       }
+       fd_dev->fd_prot_file = prot_file;
+       return 0;
+ }
+ static void fd_init_format_buf(struct se_device *dev, unsigned char *buf,
+                              u32 unit_size, u32 *ref_tag, u16 app_tag,
+                              bool inc_reftag)
+ {
+       unsigned char *p = buf;
+       int i;
+       for (i = 0; i < unit_size; i += dev->prot_length) {
+               *((u16 *)&p[0]) = 0xffff;
+               *((__be16 *)&p[2]) = cpu_to_be16(app_tag);
+               *((__be32 *)&p[4]) = cpu_to_be32(*ref_tag);
+               if (inc_reftag)
+                       (*ref_tag)++;
+               p += dev->prot_length;
+       }
+ }
+ static int fd_format_prot(struct se_device *dev)
+ {
+       struct fd_dev *fd_dev = FD_DEV(dev);
+       struct file *prot_fd = fd_dev->fd_prot_file;
+       sector_t prot_length, prot;
+       unsigned char *buf;
+       loff_t pos = 0;
+       u32 ref_tag = 0;
+       int unit_size = FDBD_FORMAT_UNIT_SIZE * dev->dev_attrib.block_size;
+       int rc, ret = 0, size, len;
+       bool inc_reftag = false;
+       if (!dev->dev_attrib.pi_prot_type) {
+               pr_err("Unable to format_prot while pi_prot_type == 0\n");
+               return -ENODEV;
+       }
+       if (!prot_fd) {
+               pr_err("Unable to locate fd_dev->fd_prot_file\n");
+               return -ENODEV;
+       }
+       switch (dev->dev_attrib.pi_prot_type) {
+       case TARGET_DIF_TYPE3_PROT:
+               ref_tag = 0xffffffff;
+               break;
+       case TARGET_DIF_TYPE2_PROT:
+       case TARGET_DIF_TYPE1_PROT:
+               inc_reftag = true;
+               break;
+       default:
+               break;
+       }
+       buf = vzalloc(unit_size);
+       if (!buf) {
+               pr_err("Unable to allocate FILEIO prot buf\n");
+               return -ENOMEM;
+       }
+       prot_length = (dev->transport->get_blocks(dev) + 1) * dev->prot_length;
+       size = prot_length;
+       pr_debug("Using FILEIO prot_length: %llu\n",
+                (unsigned long long)prot_length);
+       for (prot = 0; prot < prot_length; prot += unit_size) {
+               fd_init_format_buf(dev, buf, unit_size, &ref_tag, 0xffff,
+                                  inc_reftag);
+               len = min(unit_size, size);
+               rc = kernel_write(prot_fd, buf, len, pos);
+               if (rc != len) {
+                       pr_err("vfs_write to prot file failed: %d\n", rc);
+                       ret = -ENODEV;
+                       goto out;
+               }
+               pos += len;
+               size -= len;
+       }
+ out:
+       vfree(buf);
+       return ret;
+ }
+ static void fd_free_prot(struct se_device *dev)
+ {
+       struct fd_dev *fd_dev = FD_DEV(dev);
+       if (!fd_dev->fd_prot_file)
+               return;
+       filp_close(fd_dev->fd_prot_file, NULL);
+       fd_dev->fd_prot_file = NULL;
+ }
  static struct sbc_ops fd_sbc_ops = {
        .execute_rw             = fd_execute_rw,
        .execute_sync_cache     = fd_execute_sync_cache,
@@@ -730,6 -981,9 +981,9 @@@ static struct se_subsystem_api fileio_t
        .show_configfs_dev_params = fd_show_configfs_dev_params,
        .get_device_type        = sbc_get_device_type,
        .get_blocks             = fd_get_blocks,
+       .init_prot              = fd_init_prot,
+       .format_prot            = fd_format_prot,
+       .free_prot              = fd_free_prot,
  };
  
  static int __init fileio_module_init(void)
index d7772c167685fecc89caf699884198b9a9d9f999,97e5e7dd4259df84cdacfddf9fc4d0232ab42066..182cbb2950395efa43630364ba3d3f78aca3f925
@@@ -4,13 -4,11 +4,14 @@@
  #define FD_VERSION            "4.0"
  
  #define FD_MAX_DEV_NAME               256
+ #define FD_MAX_DEV_PROT_NAME  FD_MAX_DEV_NAME + 16
  #define FD_DEVICE_QUEUE_DEPTH 32
  #define FD_MAX_DEVICE_QUEUE_DEPTH 128
  #define FD_BLOCKSIZE          512
 -#define FD_MAX_SECTORS                2048
 +/*
 + * Limited by the number of iovecs (2048) per vfs_[writev,readv] call
 + */
 +#define FD_MAX_BYTES          8388608
  
  #define RRF_EMULATE_CDB               0x01
  #define RRF_GOT_LBA           0x02
  #define FBDF_HAS_PATH         0x01
  #define FBDF_HAS_SIZE         0x02
  #define FDBD_HAS_BUFFERED_IO_WCE 0x04
+ #define FDBD_FORMAT_UNIT_SIZE 2048
+ struct fd_prot {
+       unsigned char   *prot_buf;
+       struct scatterlist *prot_sg;
+       u32 prot_sg_nents;
+ };
  
  struct fd_dev {
        struct se_device dev;
@@@ -32,6 -37,7 +40,7 @@@
        u32             fd_block_size;
        unsigned long long fd_dev_size;
        struct file     *fd_file;
+       struct file     *fd_prot_file;
        /* FILEIO HBA device is connected to */
        struct fd_host *fd_host;
  } ____cacheline_aligned;
index 2d29356d0c85a076e90db99bbda9f1a428f9c336,293d9b081e8a9be9a754774a754a931fb561e4ec..554d4f75a75a6263ac7fb731804f787af64731ac
@@@ -91,6 -91,7 +91,7 @@@ static int iblock_configure_device(stru
        struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
        struct request_queue *q;
        struct block_device *bd = NULL;
+       struct blk_integrity *bi;
        fmode_t mode;
        int ret = -ENOMEM;
  
        if (blk_queue_nonrot(q))
                dev->dev_attrib.is_nonrot = 1;
  
+       bi = bdev_get_integrity(bd);
+       if (bi) {
+               struct bio_set *bs = ib_dev->ibd_bio_set;
+               if (!strcmp(bi->name, "T10-DIF-TYPE3-IP") ||
+                   !strcmp(bi->name, "T10-DIF-TYPE1-IP")) {
+                       pr_err("IBLOCK export of blk_integrity: %s not"
+                              " supported\n", bi->name);
+                       ret = -ENOSYS;
+                       goto out_blkdev_put;
+               }
+               if (!strcmp(bi->name, "T10-DIF-TYPE3-CRC")) {
+                       dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
+               } else if (!strcmp(bi->name, "T10-DIF-TYPE1-CRC")) {
+                       dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
+               }
+               if (dev->dev_attrib.pi_prot_type) {
+                       if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
+                               pr_err("Unable to allocate bioset for PI\n");
+                               ret = -ENOMEM;
+                               goto out_blkdev_put;
+                       }
+                       pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
+                                bs->bio_integrity_pool);
+               }
+               dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
+       }
        return 0;
  
+ out_blkdev_put:
+       blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
  out_free_bioset:
        bioset_free(ib_dev->ibd_bio_set);
        ib_dev->ibd_bio_set = NULL;
@@@ -170,8 -203,10 +203,10 @@@ static void iblock_free_device(struct s
  
        if (ib_dev->ibd_bd != NULL)
                blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
-       if (ib_dev->ibd_bio_set != NULL)
+       if (ib_dev->ibd_bio_set != NULL) {
+               bioset_integrity_free(ib_dev->ibd_bio_set);
                bioset_free(ib_dev->ibd_bio_set);
+       }
        kfree(ib_dev);
  }
  
@@@ -319,7 -354,7 +354,7 @@@ iblock_get_bio(struct se_cmd *cmd, sect
        bio->bi_bdev = ib_dev->ibd_bd;
        bio->bi_private = cmd;
        bio->bi_end_io = &iblock_bio_done;
 -      bio->bi_sector = lba;
 +      bio->bi_iter.bi_sector = lba;
  
        return bio;
  }
@@@ -586,13 -621,58 +621,58 @@@ static ssize_t iblock_show_configfs_dev
        return bl;
  }
  
 -      bip->bip_size = (cmd->data_length / dev->dev_attrib.block_size) *
+ static int
+ iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
+ {
+       struct se_device *dev = cmd->se_dev;
+       struct blk_integrity *bi;
+       struct bio_integrity_payload *bip;
+       struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
+       struct scatterlist *sg;
+       int i, rc;
+       bi = bdev_get_integrity(ib_dev->ibd_bd);
+       if (!bi) {
+               pr_err("Unable to locate bio_integrity\n");
+               return -ENODEV;
+       }
+       bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents);
+       if (!bip) {
+               pr_err("Unable to allocate bio_integrity_payload\n");
+               return -ENOMEM;
+       }
 -      bip->bip_sector = bio->bi_sector;
++      bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) *
+                        dev->prot_length;
 -      pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_size,
 -               (unsigned long long)bip->bip_sector);
++      bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
++      pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
++               (unsigned long long)bip->bip_iter.bi_sector);
+       for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) {
+               rc = bio_integrity_add_page(bio, sg_page(sg), sg->length,
+                                           sg->offset);
+               if (rc != sg->length) {
+                       pr_err("bio_integrity_add_page() failed; %d\n", rc);
+                       return -ENOMEM;
+               }
+               pr_debug("Added bio integrity page: %p length: %d offset; %d\n",
+                        sg_page(sg), sg->length, sg->offset);
+       }
+       return 0;
+ }
  static sense_reason_t
  iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
                  enum dma_data_direction data_direction)
  {
        struct se_device *dev = cmd->se_dev;
        struct iblock_req *ibr;
-       struct bio *bio;
+       struct bio *bio, *bio_start;
        struct bio_list list;
        struct scatterlist *sg;
        u32 sg_num = sgl_nents;
        if (!bio)
                goto fail_free_ibr;
  
+       bio_start = bio;
        bio_list_init(&list);
        bio_list_add(&list, bio);
  
                sg_num--;
        }
  
+       if (cmd->prot_type) {
+               int rc = iblock_alloc_bip(cmd, bio_start);
+               if (rc)
+                       goto fail_put_bios;
+       }
        iblock_submit_bios(&list, rw);
        iblock_complete_cmd(cmd);
        return 0;
@@@ -763,7 -850,7 +850,7 @@@ iblock_parse_cdb(struct se_cmd *cmd
        return sbc_parse_cdb(cmd, &iblock_sbc_ops);
  }
  
- bool iblock_get_write_cache(struct se_device *dev)
static bool iblock_get_write_cache(struct se_device *dev)
  {
        struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
        struct block_device *bd = ib_dev->ibd_bd;
index 2a573de19a9fdceea07d233f15a699be6c10c770,d1df39a05d885edaa46c1ba3d0e1742299913ae5..c036595b17cfc9cfc75f9bd57d146bfb4179d67b
@@@ -278,6 -278,7 +278,6 @@@ struct se_node_acl *core_tpg_check_init
        snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
        acl->se_tpg = tpg;
        acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
 -      spin_lock_init(&acl->stats_lock);
        acl->dynamic_node_acl = 1;
  
        tpg->se_tpg_tfo->set_default_node_attributes(acl);
@@@ -405,6 -406,7 +405,6 @@@ struct se_node_acl *core_tpg_add_initia
        snprintf(acl->initiatorname, TRANSPORT_IQN_LEN, "%s", initiatorname);
        acl->se_tpg = tpg;
        acl->acl_index = scsi_get_new_index(SCSI_AUTH_INTR_INDEX);
 -      spin_lock_init(&acl->stats_lock);
  
        tpg->se_tpg_tfo->set_default_node_attributes(acl);
  
@@@ -656,9 -658,15 +656,9 @@@ static int core_tpg_setup_virtual_lun0(
        spin_lock_init(&lun->lun_sep_lock);
        init_completion(&lun->lun_ref_comp);
  
-       ret = core_tpg_post_addlun(se_tpg, lun, lun_access, dev);
 -      ret = percpu_ref_init(&lun->lun_ref, core_tpg_lun_ref_release);
 -      if (ret < 0)
 -              return ret;
 -
+       ret = core_tpg_add_lun(se_tpg, lun, lun_access, dev);
 -      if (ret < 0) {
 -              percpu_ref_cancel_init(&lun->lun_ref);
 +      if (ret < 0)
                return ret;
 -      }
  
        return 0;
  }
@@@ -781,7 -789,7 +781,7 @@@ int core_tpg_deregister(struct se_porta
  }
  EXPORT_SYMBOL(core_tpg_deregister);
  
- struct se_lun *core_tpg_pre_addlun(
+ struct se_lun *core_tpg_alloc_lun(
        struct se_portal_group *tpg,
        u32 unpacked_lun)
  {
        return lun;
  }
  
- int core_tpg_post_addlun(
+ int core_tpg_add_lun(
        struct se_portal_group *tpg,
        struct se_lun *lun,
        u32 lun_access,
-       void *lun_ptr)
+       struct se_device *dev)
  {
        int ret;
  
        if (ret < 0)
                return ret;
  
-       ret = core_dev_export(lun_ptr, tpg, lun);
+       ret = core_dev_export(dev, tpg, lun);
        if (ret < 0) {
                percpu_ref_cancel_init(&lun->lun_ref);
                return ret;
diff --combined drivers/vhost/scsi.c
index 1e4c75c5b36bd3c9a394c755c39a291a0d2b831d,2d084fb8d4d3621cfe4b50c8d9c5c8d5b39a5603..0a025b8e2a12efd2f58434b8084a45ad2454b604
@@@ -728,7 -728,7 +728,7 @@@ vhost_scsi_get_tag(struct vhost_virtque
        }
        se_sess = tv_nexus->tvn_se_sess;
  
-       tag = percpu_ida_alloc(&se_sess->sess_tag_pool, GFP_ATOMIC);
+       tag = percpu_ida_alloc(&se_sess->sess_tag_pool, TASK_RUNNING);
        if (tag < 0) {
                pr_err("Unable to obtain tag for tcm_vhost_cmd\n");
                return ERR_PTR(-ENOMEM);
@@@ -889,7 -889,7 +889,7 @@@ static void tcm_vhost_submission_work(s
                        cmd->tvc_lun, cmd->tvc_exp_data_len,
                        cmd->tvc_task_attr, cmd->tvc_data_direction,
                        TARGET_SCF_ACK_KREF, sg_ptr, cmd->tvc_sgl_count,
-                       sg_bidi_ptr, sg_no_bidi);
+                       sg_bidi_ptr, sg_no_bidi, NULL, 0);
        if (rc < 0) {
                transport_send_check_condition_and_sense(se_cmd,
                                TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE, 0);
@@@ -1417,13 -1417,18 +1417,13 @@@ static int vhost_scsi_open(struct inod
                vqs[i] = &vs->vqs[i].vq;
                vs->vqs[i].vq.handle_kick = vhost_scsi_handle_kick;
        }
 -      r = vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
 +      vhost_dev_init(&vs->dev, vqs, VHOST_SCSI_MAX_VQ);
  
        tcm_vhost_init_inflight(vs, NULL);
  
 -      if (r < 0)
 -              goto err_init;
 -
        f->private_data = vs;
        return 0;
  
 -err_init:
 -      kfree(vqs);
  err_vqs:
        vhost_scsi_free(vs);
  err_vs:
index 321301c0a643bfb32303b3f48cecae720f3fad87,909dacbd230f54139de6751a6e452ff91711ef8b..c9c791209cd18e579c5477b6b1cbe195cba744a0
@@@ -37,6 -37,9 +37,9 @@@
  /* Used by transport_send_check_condition_and_sense() */
  #define SPC_SENSE_KEY_OFFSET                  2
  #define SPC_ADD_SENSE_LEN_OFFSET              7
+ #define SPC_DESC_TYPE_OFFSET                  8
+ #define SPC_ADDITIONAL_DESC_LEN_OFFSET                9
+ #define SPC_VALIDITY_OFFSET                   10
  #define SPC_ASC_KEY_OFFSET                    12
  #define SPC_ASCQ_KEY_OFFSET                   13
  #define TRANSPORT_IQN_LEN                     224
  /* Queue Algorithm Modifier default for restricted reordering in control mode page */
  #define DA_EMULATE_REST_REORD                 0
  
- #define SE_INQUIRY_BUF                                512
+ #define SE_INQUIRY_BUF                                1024
  #define SE_MODE_PAGE_BUF                      512
  #define SE_SENSE_BUF                          96
  
@@@ -205,6 -208,9 +208,9 @@@ enum tcm_sense_reason_table 
        TCM_OUT_OF_RESOURCES                    = R(0x12),
        TCM_PARAMETER_LIST_LENGTH_ERROR         = R(0x13),
        TCM_MISCOMPARE_VERIFY                   = R(0x14),
+       TCM_LOGICAL_BLOCK_GUARD_CHECK_FAILED    = R(0x15),
+       TCM_LOGICAL_BLOCK_APP_TAG_CHECK_FAILED  = R(0x16),
+       TCM_LOGICAL_BLOCK_REF_TAG_CHECK_FAILED  = R(0x17),
  #undef R
  };
  
@@@ -247,10 -253,28 +253,28 @@@ typedef enum 
  
  struct se_cmd;
  
+ struct t10_alua_lba_map_member {
+       struct list_head lba_map_mem_list;
+       int lba_map_mem_alua_state;
+       int lba_map_mem_alua_pg_id;
+ };
+ struct t10_alua_lba_map {
+       u64 lba_map_first_lba;
+       u64 lba_map_last_lba;
+       struct list_head lba_map_list;
+       struct list_head lba_map_mem_list;
+ };
  struct t10_alua {
        /* ALUA Target Port Group ID */
        u16     alua_tg_pt_gps_counter;
        u32     alua_tg_pt_gps_count;
+       /* Referrals support */
+       spinlock_t lba_map_lock;
+       u32     lba_map_segment_size;
+       u32     lba_map_segment_multiplier;
+       struct list_head lba_map_list;
        spinlock_t tg_pt_gps_lock;
        struct se_device *t10_dev;
        /* Used for default ALUA Target Port Group */
@@@ -284,6 -308,8 +308,8 @@@ struct t10_alua_tg_pt_gp 
        u16     tg_pt_gp_id;
        int     tg_pt_gp_valid_id;
        int     tg_pt_gp_alua_supported_states;
+       int     tg_pt_gp_alua_pending_state;
+       int     tg_pt_gp_alua_previous_state;
        int     tg_pt_gp_alua_access_status;
        int     tg_pt_gp_alua_access_type;
        int     tg_pt_gp_nonop_delay_msecs;
        int     tg_pt_gp_implicit_trans_secs;
        int     tg_pt_gp_pref;
        int     tg_pt_gp_write_metadata;
-       /* Used by struct t10_alua_tg_pt_gp->tg_pt_gp_md_buf_len */
- #define ALUA_MD_BUF_LEN                               1024
-       u32     tg_pt_gp_md_buf_len;
        u32     tg_pt_gp_members;
        atomic_t tg_pt_gp_alua_access_state;
        atomic_t tg_pt_gp_ref_cnt;
        struct config_group tg_pt_gp_group;
        struct list_head tg_pt_gp_list;
        struct list_head tg_pt_gp_mem_list;
+       struct se_port *tg_pt_gp_alua_port;
+       struct se_node_acl *tg_pt_gp_alua_nacl;
+       struct delayed_work tg_pt_gp_transition_work;
+       struct completion *tg_pt_gp_transition_complete;
  };
  
  struct t10_alua_tg_pt_gp_member {
@@@ -414,6 -441,34 +441,34 @@@ struct se_tmr_req 
        struct list_head        tmr_list;
  };
  
+ enum target_prot_op {
+       TARGET_PROT_NORMAL = 0,
+       TARGET_PROT_DIN_INSERT,
+       TARGET_PROT_DOUT_INSERT,
+       TARGET_PROT_DIN_STRIP,
+       TARGET_PROT_DOUT_STRIP,
+       TARGET_PROT_DIN_PASS,
+       TARGET_PROT_DOUT_PASS,
+ };
+ enum target_prot_ho {
+       PROT_SEPERATED,
+       PROT_INTERLEAVED,
+ };
+ enum target_prot_type {
+       TARGET_DIF_TYPE0_PROT,
+       TARGET_DIF_TYPE1_PROT,
+       TARGET_DIF_TYPE2_PROT,
+       TARGET_DIF_TYPE3_PROT,
+ };
+ struct se_dif_v1_tuple {
+       __be16                  guard_tag;
+       __be16                  app_tag;
+       __be32                  ref_tag;
+ };
  struct se_cmd {
        /* SAM response code being sent to initiator */
        u8                      scsi_status;
        void                    *priv;
  
        /* Used for lun->lun_ref counting */
-       bool                    lun_ref_active;
+       int                     lun_ref_active;
+       /* DIF related members */
+       enum target_prot_op     prot_op;
+       enum target_prot_type   prot_type;
+       u32                     prot_length;
+       u32                     reftag_seed;
+       struct scatterlist      *t_prot_sg;
+       unsigned int            t_prot_nents;
+       enum target_prot_ho     prot_handover;
+       sense_reason_t          pi_err;
+       sector_t                bad_sector;
  };
  
  struct se_ua {
        u8                      ua_asc;
        u8                      ua_ascq;
        struct se_node_acl      *ua_nacl;
-       struct list_head        ua_dev_list;
        struct list_head        ua_nacl_list;
  };
  
@@@ -517,6 -582,10 +582,6 @@@ struct se_node_acl 
        u32                     acl_index;
  #define MAX_ACL_TAG_SIZE 64
        char                    acl_tag[MAX_ACL_TAG_SIZE];
 -      u64                     num_cmds;
 -      u64                     read_bytes;
 -      u64                     write_bytes;
 -      spinlock_t              stats_lock;
        /* Used for PR SPEC_I_PT=1 and REGISTER_AND_MOVE */
        atomic_t                acl_pr_ref_count;
        struct se_dev_entry     **device_list;
@@@ -605,6 -674,9 +670,9 @@@ struct se_dev_attrib 
        int             emulate_tpws;
        int             emulate_caw;
        int             emulate_3pc;
+       int             pi_prot_format;
+       enum target_prot_type pi_prot_type;
+       enum target_prot_type hw_pi_prot_type;
        int             enforce_pr_isids;
        int             is_nonrot;
        int             emulate_rest_reord;
        u32             unmap_granularity;
        u32             unmap_granularity_alignment;
        u32             max_write_same_len;
 +      u32             max_bytes_per_io;
        struct se_device *da_dev;
        struct config_group da_group;
  };
@@@ -736,6 -807,8 +804,8 @@@ struct se_device 
        /* Linked list for struct se_hba struct se_device list */
        struct list_head        dev_list;
        struct se_lun           xcopy_lun;
+       /* Protection Information */
+       int                     prot_length;
  };
  
  struct se_hba {