]> Pileus Git - ~andy/linux/blobdiff - drivers/target/target_core_device.c
target: remove TRANSPORT_DEFERRED_CMD state
[~andy/linux] / drivers / target / target_core_device.c
index b38b6c993e6555855be8cd830451d41a0ab6e1d5..81352b7f9130144de1f366fc7db300902bd15e48 100644 (file)
@@ -472,9 +472,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
        struct se_dev_entry *deve;
        u32 i;
 
-       spin_lock_bh(&tpg->acl_node_lock);
+       spin_lock_irq(&tpg->acl_node_lock);
        list_for_each_entry(nacl, &tpg->acl_node_list, acl_list) {
-               spin_unlock_bh(&tpg->acl_node_lock);
+               spin_unlock_irq(&tpg->acl_node_lock);
 
                spin_lock_irq(&nacl->device_list_lock);
                for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
@@ -491,9 +491,9 @@ void core_clear_lun_from_tpg(struct se_lun *lun, struct se_portal_group *tpg)
                }
                spin_unlock_irq(&nacl->device_list_lock);
 
-               spin_lock_bh(&tpg->acl_node_lock);
+               spin_lock_irq(&tpg->acl_node_lock);
        }
-       spin_unlock_bh(&tpg->acl_node_lock);
+       spin_unlock_irq(&tpg->acl_node_lock);
 }
 
 static struct se_port *core_alloc_port(struct se_device *dev)
@@ -839,6 +839,24 @@ int se_dev_check_shutdown(struct se_device *dev)
        return ret;
 }
 
+u32 se_dev_align_max_sectors(u32 max_sectors, u32 block_size)
+{
+       u32 tmp, aligned_max_sectors;
+       /*
+        * Limit max_sectors to a PAGE_SIZE aligned value for modern
+        * transport_allocate_data_tasks() operation.
+        */
+       tmp = rounddown((max_sectors * block_size), PAGE_SIZE);
+       aligned_max_sectors = (tmp / block_size);
+       if (max_sectors != aligned_max_sectors) {
+               printk(KERN_INFO "Rounding down aligned max_sectors from %u"
+                               " to %u\n", max_sectors, aligned_max_sectors);
+               return aligned_max_sectors;
+       }
+
+       return max_sectors;
+}
+
 void se_dev_set_default_attribs(
        struct se_device *dev,
        struct se_dev_limits *dev_limits)
@@ -878,6 +896,11 @@ void se_dev_set_default_attribs(
         * max_sectors is based on subsystem plugin dependent requirements.
         */
        dev->se_sub_dev->se_dev_attrib.hw_max_sectors = limits->max_hw_sectors;
+       /*
+        * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
+        */
+       limits->max_sectors = se_dev_align_max_sectors(limits->max_sectors,
+                                               limits->logical_block_size);
        dev->se_sub_dev->se_dev_attrib.max_sectors = limits->max_sectors;
        /*
         * Set optimal_sectors from max_sectors, which can be lowered via
@@ -949,36 +972,24 @@ int se_dev_set_unmap_granularity_alignment(
 
 int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
 {
-       if ((flag != 0) && (flag != 1)) {
+       if (flag != 0 && flag != 1) {
                pr_err("Illegal value %d\n", flag);
                return -EINVAL;
        }
-       if (dev->transport->dpo_emulated == NULL) {
-               pr_err("dev->transport->dpo_emulated is NULL\n");
-               return -EINVAL;
-       }
-       if (dev->transport->dpo_emulated(dev) == 0) {
-               pr_err("dev->transport->dpo_emulated not supported\n");
-               return -EINVAL;
-       }
-       dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag;
-       pr_debug("dev[%p]: SE Device Page Out (DPO) Emulation"
-                       " bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo);
-       return 0;
+
+       pr_err("dpo_emulated not supported\n");
+       return -EINVAL;
 }
 
 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
 {
-       if ((flag != 0) && (flag != 1)) {
+       if (flag != 0 && flag != 1) {
                pr_err("Illegal value %d\n", flag);
                return -EINVAL;
        }
-       if (dev->transport->fua_write_emulated == NULL) {
-               pr_err("dev->transport->fua_write_emulated is NULL\n");
-               return -EINVAL;
-       }
-       if (dev->transport->fua_write_emulated(dev) == 0) {
-               pr_err("dev->transport->fua_write_emulated not supported\n");
+
+       if (dev->transport->fua_write_emulated == 0) {
+               pr_err("fua_write_emulated not supported\n");
                return -EINVAL;
        }
        dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
@@ -989,36 +1000,23 @@ int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
 
 int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
 {
-       if ((flag != 0) && (flag != 1)) {
+       if (flag != 0 && flag != 1) {
                pr_err("Illegal value %d\n", flag);
                return -EINVAL;
        }
-       if (dev->transport->fua_read_emulated == NULL) {
-               pr_err("dev->transport->fua_read_emulated is NULL\n");
-               return -EINVAL;
-       }
-       if (dev->transport->fua_read_emulated(dev) == 0) {
-               pr_err("dev->transport->fua_read_emulated not supported\n");
-               return -EINVAL;
-       }
-       dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag;
-       pr_debug("dev[%p]: SE Device Forced Unit Access READs: %d\n",
-                       dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read);
-       return 0;
+
+       pr_err("ua read emulated not supported\n");
+       return -EINVAL;
 }
 
 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
 {
-       if ((flag != 0) && (flag != 1)) {
+       if (flag != 0 && flag != 1) {
                pr_err("Illegal value %d\n", flag);
                return -EINVAL;
        }
-       if (dev->transport->write_cache_emulated == NULL) {
-               pr_err("dev->transport->write_cache_emulated is NULL\n");
-               return -EINVAL;
-       }
-       if (dev->transport->write_cache_emulated(dev) == 0) {
-               pr_err("dev->transport->write_cache_emulated not supported\n");
+       if (dev->transport->write_cache_emulated == 0) {
+               pr_err("write_cache_emulated not supported\n");
                return -EINVAL;
        }
        dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
@@ -1242,6 +1240,11 @@ int se_dev_set_max_sectors(struct se_device *dev, u32 max_sectors)
                        return -EINVAL;
                }
        }
+       /*
+        * Align max_sectors down to PAGE_SIZE to follow transport_allocate_data_tasks()
+        */
+       max_sectors = se_dev_align_max_sectors(max_sectors,
+                               dev->se_sub_dev->se_dev_attrib.block_size);
 
        dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
        pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
@@ -1344,15 +1347,17 @@ struct se_lun *core_dev_add_lun(
         */
        if (tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)) {
                struct se_node_acl *acl;
-               spin_lock_bh(&tpg->acl_node_lock);
+               spin_lock_irq(&tpg->acl_node_lock);
                list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
-                       if (acl->dynamic_node_acl) {
-                               spin_unlock_bh(&tpg->acl_node_lock);
+                       if (acl->dynamic_node_acl &&
+                           (!tpg->se_tpg_tfo->tpg_check_demo_mode_login_only ||
+                            !tpg->se_tpg_tfo->tpg_check_demo_mode_login_only(tpg))) {
+                               spin_unlock_irq(&tpg->acl_node_lock);
                                core_tpg_add_node_to_devs(acl, tpg);
-                               spin_lock_bh(&tpg->acl_node_lock);
+                               spin_lock_irq(&tpg->acl_node_lock);
                        }
                }
-               spin_unlock_bh(&tpg->acl_node_lock);
+               spin_unlock_irq(&tpg->acl_node_lock);
        }
 
        return lun_p;