]> Pileus Git - ~andy/linux/blobdiff - drivers/scsi/sd.c
Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jmorris...
[~andy/linux] / drivers / scsi / sd.c
index 9093c7261f330085bf4ba843b18b8e53ad4270cd..255da53e5a01a1dd970fac95b2106888b6e91c6b 100644 (file)
@@ -264,6 +264,15 @@ sd_show_app_tag_own(struct device *dev, struct device_attribute *attr,
        return snprintf(buf, 20, "%u\n", sdkp->ATO);
 }
 
+static ssize_t
+sd_show_thin_provisioning(struct device *dev, struct device_attribute *attr,
+                         char *buf)
+{
+       struct scsi_disk *sdkp = to_scsi_disk(dev);
+
+       return snprintf(buf, 20, "%u\n", sdkp->thin_provisioning);
+}
+
 static struct device_attribute sd_disk_attrs[] = {
        __ATTR(cache_type, S_IRUGO|S_IWUSR, sd_show_cache_type,
               sd_store_cache_type),
@@ -274,6 +283,7 @@ static struct device_attribute sd_disk_attrs[] = {
               sd_store_manage_start_stop),
        __ATTR(protection_type, S_IRUGO, sd_show_protection_type, NULL),
        __ATTR(app_tag_own, S_IRUGO, sd_show_app_tag_own, NULL),
+       __ATTR(thin_provisioning, S_IRUGO, sd_show_thin_provisioning, NULL),
        __ATTR_NULL,
 };
 
@@ -398,6 +408,57 @@ static void sd_prot_op(struct scsi_cmnd *scmd, unsigned int dif)
        scsi_set_prot_type(scmd, dif);
 }
 
+/**
+ * sd_prepare_discard - unmap blocks on thinly provisioned device
+ * @rq: Request to prepare
+ *
+ * Will issue either UNMAP or WRITE SAME(16) depending on preference
+ * indicated by target device.
+ **/
+static int sd_prepare_discard(struct request *rq)
+{
+       struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
+       struct bio *bio = rq->bio;
+       sector_t sector = bio->bi_sector;
+       unsigned int num = bio_sectors(bio);
+
+       if (sdkp->device->sector_size == 4096) {
+               sector >>= 3;
+               num >>= 3;
+       }
+
+       rq->cmd_type = REQ_TYPE_BLOCK_PC;
+       rq->timeout = SD_TIMEOUT;
+
+       memset(rq->cmd, 0, rq->cmd_len);
+
+       if (sdkp->unmap) {
+               char *buf = kmap_atomic(bio_page(bio), KM_USER0);
+
+               rq->cmd[0] = UNMAP;
+               rq->cmd[8] = 24;
+               rq->cmd_len = 10;
+
+               /* Ensure that data length matches payload */
+               rq->__data_len = bio->bi_size = bio->bi_io_vec->bv_len = 24;
+
+               put_unaligned_be16(6 + 16, &buf[0]);
+               put_unaligned_be16(16, &buf[2]);
+               put_unaligned_be64(sector, &buf[8]);
+               put_unaligned_be32(num, &buf[16]);
+
+               kunmap_atomic(buf, KM_USER0);
+       } else {
+               rq->cmd[0] = WRITE_SAME_16;
+               rq->cmd[1] = 0x8; /* UNMAP */
+               put_unaligned_be64(sector, &rq->cmd[2]);
+               put_unaligned_be32(num, &rq->cmd[10]);
+               rq->cmd_len = 16;
+       }
+
+       return BLKPREP_OK;
+}
+
 /**
  *     sd_init_command - build a scsi (read or write) command from
  *     information in the request structure.
@@ -418,6 +479,13 @@ static int sd_prep_fn(struct request_queue *q, struct request *rq)
        int ret, host_dif;
        unsigned char protect;
 
+       /*
+        * Discard request come in as REQ_TYPE_FS but we turn them into
+        * block PC requests to make life easier.
+        */
+       if (blk_discard_rq(rq))
+               ret = sd_prepare_discard(rq);
+
        if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
                ret = scsi_setup_blk_pc_cmnd(sdp, rq);
                goto out;
@@ -1432,6 +1500,19 @@ static int read_capacity_16(struct scsi_disk *sdkp, struct scsi_device *sdp,
                sd_printk(KERN_NOTICE, sdkp,
                          "physical block alignment offset: %u\n", alignment);
 
+       if (buffer[14] & 0x80) { /* TPE */
+               struct request_queue *q = sdp->request_queue;
+
+               sdkp->thin_provisioning = 1;
+               q->limits.discard_granularity = sdkp->hw_sector_size;
+               q->limits.max_discard_sectors = 0xffffffff;
+
+               if (buffer[14] & 0x40) /* TPRZ */
+                       q->limits.discard_zeroes_data = 1;
+
+               queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
+       }
+
        sdkp->capacity = lba + 1;
        return sector_size;
 }
@@ -1863,6 +1944,7 @@ void sd_read_app_tag_own(struct scsi_disk *sdkp, unsigned char *buffer)
  */
 static void sd_read_block_limits(struct scsi_disk *sdkp)
 {
+       struct request_queue *q = sdkp->disk->queue;
        unsigned int sector_sz = sdkp->device->sector_size;
        char *buffer;
 
@@ -1877,6 +1959,31 @@ static void sd_read_block_limits(struct scsi_disk *sdkp)
        blk_queue_io_opt(sdkp->disk->queue,
                         get_unaligned_be32(&buffer[12]) * sector_sz);
 
+       /* Thin provisioning enabled and page length indicates TP support */
+       if (sdkp->thin_provisioning && buffer[3] == 0x3c) {
+               unsigned int lba_count, desc_count, granularity;
+
+               lba_count = get_unaligned_be32(&buffer[20]);
+               desc_count = get_unaligned_be32(&buffer[24]);
+
+               if (lba_count) {
+                       q->limits.max_discard_sectors =
+                               lba_count * sector_sz >> 9;
+
+                       if (desc_count)
+                               sdkp->unmap = 1;
+               }
+
+               granularity = get_unaligned_be32(&buffer[28]);
+
+               if (granularity)
+                       q->limits.discard_granularity = granularity * sector_sz;
+
+               if (buffer[32] & 0x80)
+                       q->limits.discard_alignment =
+                               get_unaligned_be32(&buffer[32]) & ~(1 << 31);
+       }
+
        kfree(buffer);
 }