]> Pileus Git - ~andy/linux/commitdiff
Merge git://git.infradead.org/users/willy/linux-nvme
authorLinus Torvalds <torvalds@linux-foundation.org>
Wed, 5 Feb 2014 23:53:26 +0000 (15:53 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 5 Feb 2014 23:53:26 +0000 (15:53 -0800)
Pull NVMe driver update from Matthew Wilcox:
 "Looks like I missed the merge window ...  but these are almost all
  bugfixes anyway (the ones that aren't have been baking for months)"

* git://git.infradead.org/users/willy/linux-nvme:
  NVMe: Namespace use after free on surprise removal
  NVMe: Correct uses of INIT_WORK
  NVMe: Include device and queue numbers in interrupt name
  NVMe: Add a pci_driver shutdown method
  NVMe: Disable admin queue on init failure
  NVMe: Dynamically allocate partition numbers
  NVMe: Async IO queue deletion
  NVMe: Surprise removal handling
  NVMe: Abort timed out commands
  NVMe: Schedule reset for failed controllers
  NVMe: Device resume error handling
  NVMe: Cache dev->pci_dev in a local pointer
  NVMe: Fix lockdep warnings
  NVMe: compat SG_IO ioctl
  NVMe: remove deprecated IRQF_DISABLED
  NVMe: Avoid shift operation when writing cq head doorbell

1  2 
drivers/block/nvme-core.c

index 1f14ac4039450e84137b4eab0dacf043aa46d16e,cd39390710a058398b326c92c35959aa5353d830..51824d1f23ea53df5d49cd6d4e10e07b6b45b1fb
@@@ -46,7 -46,6 +46,6 @@@
  #define NVME_Q_DEPTH 1024
  #define SQ_SIZE(depth)                (depth * sizeof(struct nvme_command))
  #define CQ_SIZE(depth)                (depth * sizeof(struct nvme_completion))
- #define NVME_MINORS 64
  #define ADMIN_TIMEOUT (60 * HZ)
  
  static int nvme_major;
@@@ -58,6 -57,17 +57,17 @@@ module_param(use_threaded_interrupts, i
  static DEFINE_SPINLOCK(dev_list_lock);
  static LIST_HEAD(dev_list);
  static struct task_struct *nvme_thread;
+ static struct workqueue_struct *nvme_workq;
+ static void nvme_reset_failed_dev(struct work_struct *ws);
+ struct async_cmd_info {
+       struct kthread_work work;
+       struct kthread_worker *worker;
+       u32 result;
+       int status;
+       void *ctx;
+ };
  
  /*
   * An NVM Express queue.  Each device has at least two (one for admin
@@@ -66,6 -76,7 +76,7 @@@
  struct nvme_queue {
        struct device *q_dmadev;
        struct nvme_dev *dev;
+       char irqname[24];       /* nvme4294967295-65535\0 */
        spinlock_t q_lock;
        struct nvme_command *sq_cmds;
        volatile struct nvme_completion *cqes;
        u16 sq_head;
        u16 sq_tail;
        u16 cq_head;
+       u16 qid;
        u8 cq_phase;
        u8 cqe_seen;
        u8 q_suspended;
+       struct async_cmd_info cmdinfo;
        unsigned long cmdid_data[];
  };
  
@@@ -97,6 -110,7 +110,7 @@@ static inline void _nvme_check_size(voi
        BUILD_BUG_ON(sizeof(struct nvme_delete_queue) != 64);
        BUILD_BUG_ON(sizeof(struct nvme_features) != 64);
        BUILD_BUG_ON(sizeof(struct nvme_format_cmd) != 64);
+       BUILD_BUG_ON(sizeof(struct nvme_abort_cmd) != 64);
        BUILD_BUG_ON(sizeof(struct nvme_command) != 64);
        BUILD_BUG_ON(sizeof(struct nvme_id_ctrl) != 4096);
        BUILD_BUG_ON(sizeof(struct nvme_id_ns) != 4096);
@@@ -111,6 -125,7 +125,7 @@@ struct nvme_cmd_info 
        nvme_completion_fn fn;
        void *ctx;
        unsigned long timeout;
+       int aborted;
  };
  
  static struct nvme_cmd_info *nvme_cmd_info(struct nvme_queue *nvmeq)
@@@ -154,6 -169,7 +169,7 @@@ static int alloc_cmdid(struct nvme_queu
        info[cmdid].fn = handler;
        info[cmdid].ctx = ctx;
        info[cmdid].timeout = jiffies + timeout;
+       info[cmdid].aborted = 0;
        return cmdid;
  }
  
@@@ -172,6 -188,7 +188,7 @@@ static int alloc_cmdid_killable(struct 
  #define CMD_CTX_COMPLETED     (0x310 + CMD_CTX_BASE)
  #define CMD_CTX_INVALID               (0x314 + CMD_CTX_BASE)
  #define CMD_CTX_FLUSH         (0x318 + CMD_CTX_BASE)
+ #define CMD_CTX_ABORT         (0x31C + CMD_CTX_BASE)
  
  static void special_completion(struct nvme_dev *dev, void *ctx,
                                                struct nvme_completion *cqe)
                return;
        if (ctx == CMD_CTX_FLUSH)
                return;
+       if (ctx == CMD_CTX_ABORT) {
+               ++dev->abort_limit;
+               return;
+       }
        if (ctx == CMD_CTX_COMPLETED) {
                dev_warn(&dev->pci_dev->dev,
                                "completed id %d twice on queue %d\n",
        dev_warn(&dev->pci_dev->dev, "Unknown special completion %p\n", ctx);
  }
  
+ static void async_completion(struct nvme_dev *dev, void *ctx,
+                                               struct nvme_completion *cqe)
+ {
+       struct async_cmd_info *cmdinfo = ctx;
+       cmdinfo->result = le32_to_cpup(&cqe->result);
+       cmdinfo->status = le16_to_cpup(&cqe->status) >> 1;
+       queue_kthread_work(cmdinfo->worker, &cmdinfo->work);
+ }
  /*
   * Called with local interrupts disabled and the q_lock held.  May not sleep.
   */
@@@ -441,19 -471,104 +471,19 @@@ int nvme_setup_prps(struct nvme_dev *de
        return total_len;
  }
  
 -struct nvme_bio_pair {
 -      struct bio b1, b2, *parent;
 -      struct bio_vec *bv1, *bv2;
 -      int err;
 -      atomic_t cnt;
 -};
 -
 -static void nvme_bio_pair_endio(struct bio *bio, int err)
 -{
 -      struct nvme_bio_pair *bp = bio->bi_private;
 -
 -      if (err)
 -              bp->err = err;
 -
 -      if (atomic_dec_and_test(&bp->cnt)) {
 -              bio_endio(bp->parent, bp->err);
 -              kfree(bp->bv1);
 -              kfree(bp->bv2);
 -              kfree(bp);
 -      }
 -}
 -
 -static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx,
 -                                                      int len, int offset)
 -{
 -      struct nvme_bio_pair *bp;
 -
 -      BUG_ON(len > bio->bi_size);
 -      BUG_ON(idx > bio->bi_vcnt);
 -
 -      bp = kmalloc(sizeof(*bp), GFP_ATOMIC);
 -      if (!bp)
 -              return NULL;
 -      bp->err = 0;
 -
 -      bp->b1 = *bio;
 -      bp->b2 = *bio;
 -
 -      bp->b1.bi_size = len;
 -      bp->b2.bi_size -= len;
 -      bp->b1.bi_vcnt = idx;
 -      bp->b2.bi_idx = idx;
 -      bp->b2.bi_sector += len >> 9;
 -
 -      if (offset) {
 -              bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
 -                                                              GFP_ATOMIC);
 -              if (!bp->bv1)
 -                      goto split_fail_1;
 -
 -              bp->bv2 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
 -                                                              GFP_ATOMIC);
 -              if (!bp->bv2)
 -                      goto split_fail_2;
 -
 -              memcpy(bp->bv1, bio->bi_io_vec,
 -                      bio->bi_max_vecs * sizeof(struct bio_vec));
 -              memcpy(bp->bv2, bio->bi_io_vec,
 -                      bio->bi_max_vecs * sizeof(struct bio_vec));
 -
 -              bp->b1.bi_io_vec = bp->bv1;
 -              bp->b2.bi_io_vec = bp->bv2;
 -              bp->b2.bi_io_vec[idx].bv_offset += offset;
 -              bp->b2.bi_io_vec[idx].bv_len -= offset;
 -              bp->b1.bi_io_vec[idx].bv_len = offset;
 -              bp->b1.bi_vcnt++;
 -      } else
 -              bp->bv1 = bp->bv2 = NULL;
 -
 -      bp->b1.bi_private = bp;
 -      bp->b2.bi_private = bp;
 -
 -      bp->b1.bi_end_io = nvme_bio_pair_endio;
 -      bp->b2.bi_end_io = nvme_bio_pair_endio;
 -
 -      bp->parent = bio;
 -      atomic_set(&bp->cnt, 2);
 -
 -      return bp;
 -
 - split_fail_2:
 -      kfree(bp->bv1);
 - split_fail_1:
 -      kfree(bp);
 -      return NULL;
 -}
 -
  static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
 -                                              int idx, int len, int offset)
 +                               int len)
  {
 -      struct nvme_bio_pair *bp = nvme_bio_split(bio, idx, len, offset);
 -      if (!bp)
 +      struct bio *split = bio_split(bio, len >> 9, GFP_ATOMIC, NULL);
 +      if (!split)
                return -ENOMEM;
  
 +      bio_chain(split, bio);
 +
        if (bio_list_empty(&nvmeq->sq_cong))
                add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
 -      bio_list_add(&nvmeq->sq_cong, &bp->b1);
 -      bio_list_add(&nvmeq->sq_cong, &bp->b2);
 +      bio_list_add(&nvmeq->sq_cong, split);
 +      bio_list_add(&nvmeq->sq_cong, bio);
  
        return 0;
  }
  static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
                struct bio *bio, enum dma_data_direction dma_dir, int psegs)
  {
 -      struct bio_vec *bvec, *bvprv = NULL;
 +      struct bio_vec bvec, bvprv;
 +      struct bvec_iter iter;
        struct scatterlist *sg = NULL;
 -      int i, length = 0, nsegs = 0, split_len = bio->bi_size;
 +      int length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size;
 +      int first = 1;
  
        if (nvmeq->dev->stripe_size)
                split_len = nvmeq->dev->stripe_size -
 -                      ((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1));
 +                      ((bio->bi_iter.bi_sector << 9) &
 +                       (nvmeq->dev->stripe_size - 1));
  
        sg_init_table(iod->sg, psegs);
 -      bio_for_each_segment(bvec, bio, i) {
 -              if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) {
 -                      sg->length += bvec->bv_len;
 +      bio_for_each_segment(bvec, bio, iter) {
 +              if (!first && BIOVEC_PHYS_MERGEABLE(&bvprv, &bvec)) {
 +                      sg->length += bvec.bv_len;
                } else {
 -                      if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec))
 -                              return nvme_split_and_submit(bio, nvmeq, i,
 -                                                              length, 0);
 +                      if (!first && BIOVEC_NOT_VIRT_MERGEABLE(&bvprv, &bvec))
 +                              return nvme_split_and_submit(bio, nvmeq,
 +                                                           length);
  
                        sg = sg ? sg + 1 : iod->sg;
 -                      sg_set_page(sg, bvec->bv_page, bvec->bv_len,
 -                                                      bvec->bv_offset);
 +                      sg_set_page(sg, bvec.bv_page,
 +                                  bvec.bv_len, bvec.bv_offset);
                        nsegs++;
                }
  
 -              if (split_len - length < bvec->bv_len)
 -                      return nvme_split_and_submit(bio, nvmeq, i, split_len,
 -                                                      split_len - length);
 -              length += bvec->bv_len;
 +              if (split_len - length < bvec.bv_len)
 +                      return nvme_split_and_submit(bio, nvmeq, split_len);
 +              length += bvec.bv_len;
                bvprv = bvec;
 +              first = 0;
        }
        iod->nents = nsegs;
        sg_mark_end(sg);
        if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0)
                return -ENOMEM;
  
 -      BUG_ON(length != bio->bi_size);
 +      BUG_ON(length != bio->bi_iter.bi_size);
        return length;
  }
  
@@@ -526,8 -638,8 +556,8 @@@ static int nvme_submit_discard(struct n
        iod->npages = 0;
  
        range->cattr = cpu_to_le32(0);
 -      range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift);
 -      range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
 +      range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift);
 +      range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
  
        memset(cmnd, 0, sizeof(*cmnd));
        cmnd->dsm.opcode = nvme_cmd_dsm;
@@@ -592,7 -704,7 +622,7 @@@ static int nvme_submit_bio_queue(struc
        }
  
        result = -ENOMEM;
 -      iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC);
 +      iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
        if (!iod)
                goto nomem;
        iod->private = bio;
        cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
        length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
                                                                GFP_ATOMIC);
 -      cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector));
 +      cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
        cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
        cmnd->rw.control = cpu_to_le16(control);
        cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
@@@ -693,7 -805,7 +723,7 @@@ static int nvme_process_cq(struct nvme_
        if (head == nvmeq->cq_head && phase == nvmeq->cq_phase)
                return 0;
  
-       writel(head, nvmeq->q_db + (1 << nvmeq->dev->db_stride));
+       writel(head, nvmeq->q_db + nvmeq->dev->db_stride);
        nvmeq->cq_head = head;
        nvmeq->cq_phase = phase;
  
@@@ -804,12 -916,34 +834,34 @@@ int nvme_submit_sync_cmd(struct nvme_qu
        return cmdinfo.status;
  }
  
+ static int nvme_submit_async_cmd(struct nvme_queue *nvmeq,
+                       struct nvme_command *cmd,
+                       struct async_cmd_info *cmdinfo, unsigned timeout)
+ {
+       int cmdid;
+       cmdid = alloc_cmdid_killable(nvmeq, cmdinfo, async_completion, timeout);
+       if (cmdid < 0)
+               return cmdid;
+       cmdinfo->status = -EINTR;
+       cmd->common.command_id = cmdid;
+       nvme_submit_cmd(nvmeq, cmd);
+       return 0;
+ }
  int nvme_submit_admin_cmd(struct nvme_dev *dev, struct nvme_command *cmd,
                                                                u32 *result)
  {
        return nvme_submit_sync_cmd(dev->queues[0], cmd, result, ADMIN_TIMEOUT);
  }
  
+ static int nvme_submit_admin_cmd_async(struct nvme_dev *dev,
+               struct nvme_command *cmd, struct async_cmd_info *cmdinfo)
+ {
+       return nvme_submit_async_cmd(dev->queues[0], cmd, cmdinfo,
+                                                               ADMIN_TIMEOUT);
+ }
  static int adapter_delete_queue(struct nvme_dev *dev, u8 opcode, u16 id)
  {
        int status;
@@@ -919,6 -1053,56 +971,56 @@@ int nvme_set_features(struct nvme_dev *
        return nvme_submit_admin_cmd(dev, &c, result);
  }
  
+ /**
+  * nvme_abort_cmd - Attempt aborting a command
+  * @cmdid: Command id of a timed out IO
+  * @queue: The queue with timed out IO
+  *
+  * Schedule controller reset if the command was already aborted once before and
+  * still hasn't been returned to the driver, or if this is the admin queue.
+  */
+ static void nvme_abort_cmd(int cmdid, struct nvme_queue *nvmeq)
+ {
+       int a_cmdid;
+       struct nvme_command cmd;
+       struct nvme_dev *dev = nvmeq->dev;
+       struct nvme_cmd_info *info = nvme_cmd_info(nvmeq);
+       if (!nvmeq->qid || info[cmdid].aborted) {
+               if (work_busy(&dev->reset_work))
+                       return;
+               list_del_init(&dev->node);
+               dev_warn(&dev->pci_dev->dev,
+                       "I/O %d QID %d timeout, reset controller\n", cmdid,
+                                                               nvmeq->qid);
+               PREPARE_WORK(&dev->reset_work, nvme_reset_failed_dev);
+               queue_work(nvme_workq, &dev->reset_work);
+               return;
+       }
+       if (!dev->abort_limit)
+               return;
+       a_cmdid = alloc_cmdid(dev->queues[0], CMD_CTX_ABORT, special_completion,
+                                                               ADMIN_TIMEOUT);
+       if (a_cmdid < 0)
+               return;
+       memset(&cmd, 0, sizeof(cmd));
+       cmd.abort.opcode = nvme_admin_abort_cmd;
+       cmd.abort.cid = cmdid;
+       cmd.abort.sqid = cpu_to_le16(nvmeq->qid);
+       cmd.abort.command_id = a_cmdid;
+       --dev->abort_limit;
+       info[cmdid].aborted = 1;
+       info[cmdid].timeout = jiffies + ADMIN_TIMEOUT;
+       dev_warn(nvmeq->q_dmadev, "Aborting I/O %d QID %d\n", cmdid,
+                                                       nvmeq->qid);
+       nvme_submit_cmd(dev->queues[0], &cmd);
+ }
  /**
   * nvme_cancel_ios - Cancel outstanding I/Os
   * @queue: The queue to cancel I/Os on
@@@ -942,7 -1126,12 +1044,12 @@@ static void nvme_cancel_ios(struct nvme
                        continue;
                if (info[cmdid].ctx == CMD_CTX_CANCELLED)
                        continue;
-               dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d\n", cmdid);
+               if (timeout && nvmeq->dev->initialized) {
+                       nvme_abort_cmd(cmdid, nvmeq);
+                       continue;
+               }
+               dev_warn(nvmeq->q_dmadev, "Cancelling I/O %d QID %d\n", cmdid,
+                                                               nvmeq->qid);
                ctx = cancel_cmdid(nvmeq, cmdid, &fn);
                fn(nvmeq->dev, ctx, &cqe);
        }
@@@ -964,26 -1153,31 +1071,31 @@@ static void nvme_free_queue(struct nvme
        kfree(nvmeq);
  }
  
- static void nvme_free_queues(struct nvme_dev *dev)
+ static void nvme_free_queues(struct nvme_dev *dev, int lowest)
  {
        int i;
  
-       for (i = dev->queue_count - 1; i >= 0; i--) {
+       for (i = dev->queue_count - 1; i >= lowest; i--) {
                nvme_free_queue(dev->queues[i]);
                dev->queue_count--;
                dev->queues[i] = NULL;
        }
  }
  
- static void nvme_disable_queue(struct nvme_dev *dev, int qid)
+ /**
+  * nvme_suspend_queue - put queue into suspended state
+  * @nvmeq - queue to suspend
+  *
+  * Returns 1 if already suspended, 0 otherwise.
+  */
+ static int nvme_suspend_queue(struct nvme_queue *nvmeq)
  {
-       struct nvme_queue *nvmeq = dev->queues[qid];
-       int vector = dev->entry[nvmeq->cq_vector].vector;
+       int vector = nvmeq->dev->entry[nvmeq->cq_vector].vector;
  
        spin_lock_irq(&nvmeq->q_lock);
        if (nvmeq->q_suspended) {
                spin_unlock_irq(&nvmeq->q_lock);
-               return;
+               return 1;
        }
        nvmeq->q_suspended = 1;
        spin_unlock_irq(&nvmeq->q_lock);
        irq_set_affinity_hint(vector, NULL);
        free_irq(vector, nvmeq);
  
-       /* Don't tell the adapter to delete the admin queue */
-       if (qid) {
-               adapter_delete_sq(dev, qid);
-               adapter_delete_cq(dev, qid);
-       }
+       return 0;
+ }
  
+ static void nvme_clear_queue(struct nvme_queue *nvmeq)
+ {
        spin_lock_irq(&nvmeq->q_lock);
        nvme_process_cq(nvmeq);
        nvme_cancel_ios(nvmeq, false);
        spin_unlock_irq(&nvmeq->q_lock);
  }
  
+ static void nvme_disable_queue(struct nvme_dev *dev, int qid)
+ {
+       struct nvme_queue *nvmeq = dev->queues[qid];
+       if (!nvmeq)
+               return;
+       if (nvme_suspend_queue(nvmeq))
+               return;
+       /* Don't tell the adapter to delete the admin queue.
+        * Don't tell a removed adapter to delete IO queues. */
+       if (qid && readl(&dev->bar->csts) != -1) {
+               adapter_delete_sq(dev, qid);
+               adapter_delete_cq(dev, qid);
+       }
+       nvme_clear_queue(nvmeq);
+ }
  static struct nvme_queue *nvme_alloc_queue(struct nvme_dev *dev, int qid,
                                                        int depth, int vector)
  {
  
        nvmeq->q_dmadev = dmadev;
        nvmeq->dev = dev;
+       snprintf(nvmeq->irqname, sizeof(nvmeq->irqname), "nvme%dq%d",
+                       dev->instance, qid);
        spin_lock_init(&nvmeq->q_lock);
        nvmeq->cq_head = 0;
        nvmeq->cq_phase = 1;
        init_waitqueue_head(&nvmeq->sq_full);
        init_waitqueue_entry(&nvmeq->sq_cong_wait, nvme_thread);
        bio_list_init(&nvmeq->sq_cong);
-       nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
+       nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
        nvmeq->q_depth = depth;
        nvmeq->cq_vector = vector;
+       nvmeq->qid = qid;
        nvmeq->q_suspended = 1;
        dev->queue_count++;
  
@@@ -1052,11 -1266,10 +1184,10 @@@ static int queue_request_irq(struct nvm
  {
        if (use_threaded_interrupts)
                return request_threaded_irq(dev->entry[nvmeq->cq_vector].vector,
-                                       nvme_irq_check, nvme_irq,
-                                       IRQF_DISABLED | IRQF_SHARED,
+                                       nvme_irq_check, nvme_irq, IRQF_SHARED,
                                        name, nvmeq);
        return request_irq(dev->entry[nvmeq->cq_vector].vector, nvme_irq,
-                               IRQF_DISABLED | IRQF_SHARED, name, nvmeq);
+                               IRQF_SHARED, name, nvmeq);
  }
  
  static void nvme_init_queue(struct nvme_queue *nvmeq, u16 qid)
        nvmeq->sq_tail = 0;
        nvmeq->cq_head = 0;
        nvmeq->cq_phase = 1;
-       nvmeq->q_db = &dev->dbs[qid << (dev->db_stride + 1)];
+       nvmeq->q_db = &dev->dbs[qid * 2 * dev->db_stride];
        memset(nvmeq->cmdid_data, 0, extra);
        memset((void *)nvmeq->cqes, 0, CQ_SIZE(nvmeq->q_depth));
        nvme_cancel_ios(nvmeq, false);
@@@ -1087,13 -1300,13 +1218,13 @@@ static int nvme_create_queue(struct nvm
        if (result < 0)
                goto release_cq;
  
-       result = queue_request_irq(dev, nvmeq, "nvme");
+       result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
        if (result < 0)
                goto release_sq;
  
-       spin_lock(&nvmeq->q_lock);
+       spin_lock_irq(&nvmeq->q_lock);
        nvme_init_queue(nvmeq, qid);
-       spin_unlock(&nvmeq->q_lock);
+       spin_unlock_irq(&nvmeq->q_lock);
  
        return result;
  
@@@ -1205,13 -1418,13 +1336,13 @@@ static int nvme_configure_admin_queue(s
        if (result)
                return result;
  
-       result = queue_request_irq(dev, nvmeq, "nvme admin");
+       result = queue_request_irq(dev, nvmeq, nvmeq->irqname);
        if (result)
                return result;
  
-       spin_lock(&nvmeq->q_lock);
+       spin_lock_irq(&nvmeq->q_lock);
        nvme_init_queue(nvmeq, 0);
-       spin_unlock(&nvmeq->q_lock);
+       spin_unlock_irq(&nvmeq->q_lock);
        return result;
  }
  
@@@ -1487,10 -1700,47 +1618,47 @@@ static int nvme_ioctl(struct block_devi
        }
  }
  
+ #ifdef CONFIG_COMPAT
+ static int nvme_compat_ioctl(struct block_device *bdev, fmode_t mode,
+                                       unsigned int cmd, unsigned long arg)
+ {
+       struct nvme_ns *ns = bdev->bd_disk->private_data;
+       switch (cmd) {
+       case SG_IO:
+               return nvme_sg_io32(ns, arg);
+       }
+       return nvme_ioctl(bdev, mode, cmd, arg);
+ }
+ #else
+ #define nvme_compat_ioctl     NULL
+ #endif
+ static int nvme_open(struct block_device *bdev, fmode_t mode)
+ {
+       struct nvme_ns *ns = bdev->bd_disk->private_data;
+       struct nvme_dev *dev = ns->dev;
+       kref_get(&dev->kref);
+       return 0;
+ }
+ static void nvme_free_dev(struct kref *kref);
+ static void nvme_release(struct gendisk *disk, fmode_t mode)
+ {
+       struct nvme_ns *ns = disk->private_data;
+       struct nvme_dev *dev = ns->dev;
+       kref_put(&dev->kref, nvme_free_dev);
+ }
  static const struct block_device_operations nvme_fops = {
        .owner          = THIS_MODULE,
        .ioctl          = nvme_ioctl,
-       .compat_ioctl   = nvme_ioctl,
+       .compat_ioctl   = nvme_compat_ioctl,
+       .open           = nvme_open,
+       .release        = nvme_release,
  };
  
  static void nvme_resubmit_bios(struct nvme_queue *nvmeq)
  
  static int nvme_kthread(void *data)
  {
-       struct nvme_dev *dev;
+       struct nvme_dev *dev, *next;
  
        while (!kthread_should_stop()) {
                set_current_state(TASK_INTERRUPTIBLE);
                spin_lock(&dev_list_lock);
-               list_for_each_entry(dev, &dev_list, node) {
+               list_for_each_entry_safe(dev, next, &dev_list, node) {
                        int i;
+                       if (readl(&dev->bar->csts) & NVME_CSTS_CFS &&
+                                                       dev->initialized) {
+                               if (work_busy(&dev->reset_work))
+                                       continue;
+                               list_del_init(&dev->node);
+                               dev_warn(&dev->pci_dev->dev,
+                                       "Failed status, reset controller\n");
+                               PREPARE_WORK(&dev->reset_work,
+                                                       nvme_reset_failed_dev);
+                               queue_work(nvme_workq, &dev->reset_work);
+                               continue;
+                       }
                        for (i = 0; i < dev->queue_count; i++) {
                                struct nvme_queue *nvmeq = dev->queues[i];
                                if (!nvmeq)
        return 0;
  }
  
- static DEFINE_IDA(nvme_index_ida);
- static int nvme_get_ns_idx(void)
- {
-       int index, error;
-       do {
-               if (!ida_pre_get(&nvme_index_ida, GFP_KERNEL))
-                       return -1;
-               spin_lock(&dev_list_lock);
-               error = ida_get_new(&nvme_index_ida, &index);
-               spin_unlock(&dev_list_lock);
-       } while (error == -EAGAIN);
-       if (error)
-               index = -1;
-       return index;
- }
- static void nvme_put_ns_idx(int index)
- {
-       spin_lock(&dev_list_lock);
-       ida_remove(&nvme_index_ida, index);
-       spin_unlock(&dev_list_lock);
- }
  static void nvme_config_discard(struct nvme_ns *ns)
  {
        u32 logical_block_size = queue_logical_block_size(ns->queue);
@@@ -1601,7 -1836,7 +1754,7 @@@ static struct nvme_ns *nvme_alloc_ns(st
        ns->dev = dev;
        ns->queue->queuedata = ns;
  
-       disk = alloc_disk(NVME_MINORS);
+       disk = alloc_disk(0);
        if (!disk)
                goto out_free_queue;
        ns->ns_id = nsid;
                blk_queue_max_hw_sectors(ns->queue, dev->max_hw_sectors);
  
        disk->major = nvme_major;
-       disk->minors = NVME_MINORS;
-       disk->first_minor = NVME_MINORS * nvme_get_ns_idx();
+       disk->first_minor = 0;
        disk->fops = &nvme_fops;
        disk->private_data = ns;
        disk->queue = ns->queue;
        disk->driverfs_dev = &dev->pci_dev->dev;
+       disk->flags = GENHD_FL_EXT_DEVT;
        sprintf(disk->disk_name, "nvme%dn%d", dev->instance, nsid);
        set_capacity(disk, le64_to_cpup(&id->nsze) << (ns->lba_shift - 9));
  
        return NULL;
  }
  
- static void nvme_ns_free(struct nvme_ns *ns)
- {
-       int index = ns->disk->first_minor / NVME_MINORS;
-       put_disk(ns->disk);
-       nvme_put_ns_idx(index);
-       blk_cleanup_queue(ns->queue);
-       kfree(ns);
- }
  static int set_queue_count(struct nvme_dev *dev, int count)
  {
        int status;
  
  static size_t db_bar_size(struct nvme_dev *dev, unsigned nr_io_queues)
  {
-       return 4096 + ((nr_io_queues + 1) << (dev->db_stride + 3));
+       return 4096 + ((nr_io_queues + 1) * 8 * dev->db_stride);
  }
  
  static int nvme_setup_io_queues(struct nvme_dev *dev)
  {
+       struct nvme_queue *adminq = dev->queues[0];
        struct pci_dev *pdev = dev->pci_dev;
        int result, cpu, i, vecs, nr_io_queues, size, q_depth;
  
        }
  
        /* Deregister the admin queue's interrupt */
-       free_irq(dev->entry[0].vector, dev->queues[0]);
+       free_irq(dev->entry[0].vector, adminq);
  
        vecs = nr_io_queues;
        for (i = 0; i < vecs; i++)
         */
        nr_io_queues = vecs;
  
-       result = queue_request_irq(dev, dev->queues[0], "nvme admin");
+       result = queue_request_irq(dev, adminq, adminq->irqname);
        if (result) {
-               dev->queues[0]->q_suspended = 1;
+               adminq->q_suspended = 1;
                goto free_queues;
        }
  
        for (i = dev->queue_count - 1; i > nr_io_queues; i--) {
                struct nvme_queue *nvmeq = dev->queues[i];
  
-               spin_lock(&nvmeq->q_lock);
+               spin_lock_irq(&nvmeq->q_lock);
                nvme_cancel_ios(nvmeq, false);
-               spin_unlock(&nvmeq->q_lock);
+               spin_unlock_irq(&nvmeq->q_lock);
  
                nvme_free_queue(nvmeq);
                dev->queue_count--;
        return 0;
  
   free_queues:
-       nvme_free_queues(dev);
+       nvme_free_queues(dev, 1);
        return result;
  }
  
   */
  static int nvme_dev_add(struct nvme_dev *dev)
  {
+       struct pci_dev *pdev = dev->pci_dev;
        int res;
        unsigned nn, i;
        struct nvme_ns *ns;
        dma_addr_t dma_addr;
        int shift = NVME_CAP_MPSMIN(readq(&dev->bar->cap)) + 12;
  
-       mem = dma_alloc_coherent(&dev->pci_dev->dev, 8192, &dma_addr,
-                                                               GFP_KERNEL);
+       mem = dma_alloc_coherent(&pdev->dev, 8192, &dma_addr, GFP_KERNEL);
        if (!mem)
                return -ENOMEM;
  
        ctrl = mem;
        nn = le32_to_cpup(&ctrl->nn);
        dev->oncs = le16_to_cpup(&ctrl->oncs);
+       dev->abort_limit = ctrl->acl + 1;
        memcpy(dev->serial, ctrl->sn, sizeof(ctrl->sn));
        memcpy(dev->model, ctrl->mn, sizeof(ctrl->mn));
        memcpy(dev->firmware_rev, ctrl->fr, sizeof(ctrl->fr));
        if (ctrl->mdts)
                dev->max_hw_sectors = 1 << (ctrl->mdts + shift - 9);
-       if ((dev->pci_dev->vendor == PCI_VENDOR_ID_INTEL) &&
-                       (dev->pci_dev->device == 0x0953) && ctrl->vs[3])
+       if ((pdev->vendor == PCI_VENDOR_ID_INTEL) &&
+                       (pdev->device == 0x0953) && ctrl->vs[3])
                dev->stripe_size = 1 << (ctrl->vs[3] + shift);
  
        id_ns = mem;
@@@ -1871,16 -2099,21 +2017,21 @@@ static int nvme_dev_map(struct nvme_de
            dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)))
                goto disable;
  
-       pci_set_drvdata(pdev, dev);
        dev->bar = ioremap(pci_resource_start(pdev, 0), 8192);
        if (!dev->bar)
                goto disable;
-       dev->db_stride = NVME_CAP_STRIDE(readq(&dev->bar->cap));
+       if (readl(&dev->bar->csts) == -1) {
+               result = -ENODEV;
+               goto unmap;
+       }
+       dev->db_stride = 1 << NVME_CAP_STRIDE(readq(&dev->bar->cap));
        dev->dbs = ((void __iomem *)dev->bar) + 4096;
  
        return 0;
  
+  unmap:
+       iounmap(dev->bar);
+       dev->bar = NULL;
   disable:
        pci_release_regions(pdev);
   disable_pci:
@@@ -1898,37 -2131,183 +2049,183 @@@ static void nvme_dev_unmap(struct nvme_
        if (dev->bar) {
                iounmap(dev->bar);
                dev->bar = NULL;
+               pci_release_regions(dev->pci_dev);
        }
  
-       pci_release_regions(dev->pci_dev);
        if (pci_is_enabled(dev->pci_dev))
                pci_disable_device(dev->pci_dev);
  }
  
+ struct nvme_delq_ctx {
+       struct task_struct *waiter;
+       struct kthread_worker *worker;
+       atomic_t refcount;
+ };
+ static void nvme_wait_dq(struct nvme_delq_ctx *dq, struct nvme_dev *dev)
+ {
+       dq->waiter = current;
+       mb();
+       for (;;) {
+               set_current_state(TASK_KILLABLE);
+               if (!atomic_read(&dq->refcount))
+                       break;
+               if (!schedule_timeout(ADMIN_TIMEOUT) ||
+                                       fatal_signal_pending(current)) {
+                       set_current_state(TASK_RUNNING);
+                       nvme_disable_ctrl(dev, readq(&dev->bar->cap));
+                       nvme_disable_queue(dev, 0);
+                       send_sig(SIGKILL, dq->worker->task, 1);
+                       flush_kthread_worker(dq->worker);
+                       return;
+               }
+       }
+       set_current_state(TASK_RUNNING);
+ }
+ static void nvme_put_dq(struct nvme_delq_ctx *dq)
+ {
+       atomic_dec(&dq->refcount);
+       if (dq->waiter)
+               wake_up_process(dq->waiter);
+ }
+ static struct nvme_delq_ctx *nvme_get_dq(struct nvme_delq_ctx *dq)
+ {
+       atomic_inc(&dq->refcount);
+       return dq;
+ }
+ static void nvme_del_queue_end(struct nvme_queue *nvmeq)
+ {
+       struct nvme_delq_ctx *dq = nvmeq->cmdinfo.ctx;
+       nvme_clear_queue(nvmeq);
+       nvme_put_dq(dq);
+ }
+ static int adapter_async_del_queue(struct nvme_queue *nvmeq, u8 opcode,
+                                               kthread_work_func_t fn)
+ {
+       struct nvme_command c;
+       memset(&c, 0, sizeof(c));
+       c.delete_queue.opcode = opcode;
+       c.delete_queue.qid = cpu_to_le16(nvmeq->qid);
+       init_kthread_work(&nvmeq->cmdinfo.work, fn);
+       return nvme_submit_admin_cmd_async(nvmeq->dev, &c, &nvmeq->cmdinfo);
+ }
+ static void nvme_del_cq_work_handler(struct kthread_work *work)
+ {
+       struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
+                                                       cmdinfo.work);
+       nvme_del_queue_end(nvmeq);
+ }
+ static int nvme_delete_cq(struct nvme_queue *nvmeq)
+ {
+       return adapter_async_del_queue(nvmeq, nvme_admin_delete_cq,
+                                               nvme_del_cq_work_handler);
+ }
+ static void nvme_del_sq_work_handler(struct kthread_work *work)
+ {
+       struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
+                                                       cmdinfo.work);
+       int status = nvmeq->cmdinfo.status;
+       if (!status)
+               status = nvme_delete_cq(nvmeq);
+       if (status)
+               nvme_del_queue_end(nvmeq);
+ }
+ static int nvme_delete_sq(struct nvme_queue *nvmeq)
+ {
+       return adapter_async_del_queue(nvmeq, nvme_admin_delete_sq,
+                                               nvme_del_sq_work_handler);
+ }
+ static void nvme_del_queue_start(struct kthread_work *work)
+ {
+       struct nvme_queue *nvmeq = container_of(work, struct nvme_queue,
+                                                       cmdinfo.work);
+       allow_signal(SIGKILL);
+       if (nvme_delete_sq(nvmeq))
+               nvme_del_queue_end(nvmeq);
+ }
+ static void nvme_disable_io_queues(struct nvme_dev *dev)
+ {
+       int i;
+       DEFINE_KTHREAD_WORKER_ONSTACK(worker);
+       struct nvme_delq_ctx dq;
+       struct task_struct *kworker_task = kthread_run(kthread_worker_fn,
+                                       &worker, "nvme%d", dev->instance);
+       if (IS_ERR(kworker_task)) {
+               dev_err(&dev->pci_dev->dev,
+                       "Failed to create queue del task\n");
+               for (i = dev->queue_count - 1; i > 0; i--)
+                       nvme_disable_queue(dev, i);
+               return;
+       }
+       dq.waiter = NULL;
+       atomic_set(&dq.refcount, 0);
+       dq.worker = &worker;
+       for (i = dev->queue_count - 1; i > 0; i--) {
+               struct nvme_queue *nvmeq = dev->queues[i];
+               if (nvme_suspend_queue(nvmeq))
+                       continue;
+               nvmeq->cmdinfo.ctx = nvme_get_dq(&dq);
+               nvmeq->cmdinfo.worker = dq.worker;
+               init_kthread_work(&nvmeq->cmdinfo.work, nvme_del_queue_start);
+               queue_kthread_work(dq.worker, &nvmeq->cmdinfo.work);
+       }
+       nvme_wait_dq(&dq, dev);
+       kthread_stop(kworker_task);
+ }
  static void nvme_dev_shutdown(struct nvme_dev *dev)
  {
        int i;
  
-       for (i = dev->queue_count - 1; i >= 0; i--)
-               nvme_disable_queue(dev, i);
+       dev->initialized = 0;
  
        spin_lock(&dev_list_lock);
        list_del_init(&dev->node);
        spin_unlock(&dev_list_lock);
  
-       if (dev->bar)
+       if (!dev->bar || (dev->bar && readl(&dev->bar->csts) == -1)) {
+               for (i = dev->queue_count - 1; i >= 0; i--) {
+                       struct nvme_queue *nvmeq = dev->queues[i];
+                       nvme_suspend_queue(nvmeq);
+                       nvme_clear_queue(nvmeq);
+               }
+       } else {
+               nvme_disable_io_queues(dev);
                nvme_shutdown_ctrl(dev);
+               nvme_disable_queue(dev, 0);
+       }
        nvme_dev_unmap(dev);
  }
  
  static void nvme_dev_remove(struct nvme_dev *dev)
  {
-       struct nvme_ns *ns, *next;
+       struct nvme_ns *ns;
  
-       list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
-               list_del(&ns->list);
-               del_gendisk(ns->disk);
-               nvme_ns_free(ns);
+       list_for_each_entry(ns, &dev->namespaces, list) {
+               if (ns->disk->flags & GENHD_FL_UP)
+                       del_gendisk(ns->disk);
+               if (!blk_queue_dying(ns->queue))
+                       blk_cleanup_queue(ns->queue);
        }
  }
  
@@@ -1985,14 -2364,22 +2282,22 @@@ static void nvme_release_instance(struc
        spin_unlock(&dev_list_lock);
  }
  
+ static void nvme_free_namespaces(struct nvme_dev *dev)
+ {
+       struct nvme_ns *ns, *next;
+       list_for_each_entry_safe(ns, next, &dev->namespaces, list) {
+               list_del(&ns->list);
+               put_disk(ns->disk);
+               kfree(ns);
+       }
+ }
  static void nvme_free_dev(struct kref *kref)
  {
        struct nvme_dev *dev = container_of(kref, struct nvme_dev, kref);
-       nvme_dev_remove(dev);
-       nvme_dev_shutdown(dev);
-       nvme_free_queues(dev);
-       nvme_release_instance(dev);
-       nvme_release_prp_pools(dev);
+       nvme_free_namespaces(dev);
        kfree(dev->queues);
        kfree(dev->entry);
        kfree(dev);
@@@ -2056,6 -2443,7 +2361,7 @@@ static int nvme_dev_start(struct nvme_d
        return result;
  
   disable:
+       nvme_disable_queue(dev, 0);
        spin_lock(&dev_list_lock);
        list_del_init(&dev->node);
        spin_unlock(&dev_list_lock);
        return result;
  }
  
+ static int nvme_remove_dead_ctrl(void *arg)
+ {
+       struct nvme_dev *dev = (struct nvme_dev *)arg;
+       struct pci_dev *pdev = dev->pci_dev;
+       if (pci_get_drvdata(pdev))
+               pci_stop_and_remove_bus_device(pdev);
+       kref_put(&dev->kref, nvme_free_dev);
+       return 0;
+ }
+ static void nvme_remove_disks(struct work_struct *ws)
+ {
+       int i;
+       struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
+       nvme_dev_remove(dev);
+       spin_lock(&dev_list_lock);
+       for (i = dev->queue_count - 1; i > 0; i--) {
+               BUG_ON(!dev->queues[i] || !dev->queues[i]->q_suspended);
+               nvme_free_queue(dev->queues[i]);
+               dev->queue_count--;
+               dev->queues[i] = NULL;
+       }
+       spin_unlock(&dev_list_lock);
+ }
+ static int nvme_dev_resume(struct nvme_dev *dev)
+ {
+       int ret;
+       ret = nvme_dev_start(dev);
+       if (ret && ret != -EBUSY)
+               return ret;
+       if (ret == -EBUSY) {
+               spin_lock(&dev_list_lock);
+               PREPARE_WORK(&dev->reset_work, nvme_remove_disks);
+               queue_work(nvme_workq, &dev->reset_work);
+               spin_unlock(&dev_list_lock);
+       }
+       dev->initialized = 1;
+       return 0;
+ }
+ static void nvme_dev_reset(struct nvme_dev *dev)
+ {
+       nvme_dev_shutdown(dev);
+       if (nvme_dev_resume(dev)) {
+               dev_err(&dev->pci_dev->dev, "Device failed to resume\n");
+               kref_get(&dev->kref);
+               if (IS_ERR(kthread_run(nvme_remove_dead_ctrl, dev, "nvme%d",
+                                                       dev->instance))) {
+                       dev_err(&dev->pci_dev->dev,
+                               "Failed to start controller remove task\n");
+                       kref_put(&dev->kref, nvme_free_dev);
+               }
+       }
+ }
+ static void nvme_reset_failed_dev(struct work_struct *ws)
+ {
+       struct nvme_dev *dev = container_of(ws, struct nvme_dev, reset_work);
+       nvme_dev_reset(dev);
+ }
  static int nvme_probe(struct pci_dev *pdev, const struct pci_device_id *id)
  {
        int result = -ENOMEM;
                goto free;
  
        INIT_LIST_HEAD(&dev->namespaces);
+       INIT_WORK(&dev->reset_work, nvme_reset_failed_dev);
        dev->pci_dev = pdev;
+       pci_set_drvdata(pdev, dev);
        result = nvme_set_instance(dev);
        if (result)
                goto free;
                goto release_pools;
        }
  
+       kref_init(&dev->kref);
        result = nvme_dev_add(dev);
        if (result)
                goto shutdown;
        if (result)
                goto remove;
  
-       kref_init(&dev->kref);
+       dev->initialized = 1;
        return 0;
  
   remove:
        nvme_dev_remove(dev);
+       nvme_free_namespaces(dev);
   shutdown:
        nvme_dev_shutdown(dev);
   release_pools:
-       nvme_free_queues(dev);
+       nvme_free_queues(dev, 0);
        nvme_release_prp_pools(dev);
   release:
        nvme_release_instance(dev);
        return result;
  }
  
+ static void nvme_shutdown(struct pci_dev *pdev)
+ {
+       struct nvme_dev *dev = pci_get_drvdata(pdev);
+       nvme_dev_shutdown(dev);
+ }
  static void nvme_remove(struct pci_dev *pdev)
  {
        struct nvme_dev *dev = pci_get_drvdata(pdev);
+       spin_lock(&dev_list_lock);
+       list_del_init(&dev->node);
+       spin_unlock(&dev_list_lock);
+       pci_set_drvdata(pdev, NULL);
+       flush_work(&dev->reset_work);
        misc_deregister(&dev->miscdev);
+       nvme_dev_remove(dev);
+       nvme_dev_shutdown(dev);
+       nvme_free_queues(dev, 0);
+       nvme_release_instance(dev);
+       nvme_release_prp_pools(dev);
        kref_put(&dev->kref, nvme_free_dev);
  }
  
@@@ -2159,13 -2633,12 +2551,12 @@@ static int nvme_resume(struct device *d
  {
        struct pci_dev *pdev = to_pci_dev(dev);
        struct nvme_dev *ndev = pci_get_drvdata(pdev);
-       int ret;
  
-       ret = nvme_dev_start(ndev);
-       /* XXX: should remove gendisks if resume fails */
-       if (ret)
-               nvme_free_queues(ndev);
-       return ret;
+       if (nvme_dev_resume(ndev) && !work_busy(&ndev->reset_work)) {
+               PREPARE_WORK(&ndev->reset_work, nvme_reset_failed_dev);
+               queue_work(nvme_workq, &ndev->reset_work);
+       }
+       return 0;
  }
  
  static SIMPLE_DEV_PM_OPS(nvme_dev_pm_ops, nvme_suspend, nvme_resume);
@@@ -2192,6 -2665,7 +2583,7 @@@ static struct pci_driver nvme_driver = 
        .id_table       = nvme_id_table,
        .probe          = nvme_probe,
        .remove         = nvme_remove,
+       .shutdown       = nvme_shutdown,
        .driver         = {
                .pm     = &nvme_dev_pm_ops,
        },
@@@ -2206,9 -2680,14 +2598,14 @@@ static int __init nvme_init(void
        if (IS_ERR(nvme_thread))
                return PTR_ERR(nvme_thread);
  
+       result = -ENOMEM;
+       nvme_workq = create_singlethread_workqueue("nvme");
+       if (!nvme_workq)
+               goto kill_kthread;
        result = register_blkdev(nvme_major, "nvme");
        if (result < 0)
-               goto kill_kthread;
+               goto kill_workq;
        else if (result > 0)
                nvme_major = result;
  
  
   unregister_blkdev:
        unregister_blkdev(nvme_major, "nvme");
+  kill_workq:
+       destroy_workqueue(nvme_workq);
   kill_kthread:
        kthread_stop(nvme_thread);
        return result;
@@@ -2228,6 -2709,7 +2627,7 @@@ static void __exit nvme_exit(void
  {
        pci_unregister_driver(&nvme_driver);
        unregister_blkdev(nvme_major, "nvme");
+       destroy_workqueue(nvme_workq);
        kthread_stop(nvme_thread);
  }