2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
21 * 4/3/2011: K. Y. Srinivasan - Significant restructuring and cleanup.
23 #include <linux/init.h>
24 #include <linux/module.h>
25 #include <linux/device.h>
26 #include <linux/blkdev.h>
27 #include <linux/major.h>
28 #include <linux/delay.h>
29 #include <linux/hdreg.h>
30 #include <linux/mutex.h>
31 #include <linux/slab.h>
32 #include <scsi/scsi.h>
33 #include <scsi/scsi_cmnd.h>
34 #include <scsi/scsi_eh.h>
35 #include <scsi/scsi_dbg.h>
38 #include "version_info.h"
40 #include "storvsc_api.h"
43 #define BLKVSC_MINORS 64
45 enum blkvsc_device_type {
58 * This request ties the struct request and struct
59 * blkvsc_request/hv_storvsc_request together A struct request may be
60 * represented by 1 or more struct blkvsc_request
62 struct blkvsc_request_group {
65 struct list_head blkvsc_req_list; /* list of blkvsc_requests */
68 struct blkvsc_request {
69 /* blkvsc_request_group.blkvsc_req_list */
70 struct list_head req_entry;
72 /* block_device_context.pending_list */
73 struct list_head pend_entry;
75 /* This may be null if we generate a request internally */
78 struct block_device_context *dev;
80 /* The group this request is part of. Maybe null */
81 struct blkvsc_request_group *group;
84 sector_t sector_start;
85 unsigned long sector_count;
87 unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE];
88 unsigned char cmd_len;
89 unsigned char cmnd[MAX_COMMAND_SIZE];
91 struct hv_storvsc_request request;
94 /* Per device structure */
95 struct block_device_context {
96 /* point back to our device context */
97 struct hv_device *device_ctx;
98 struct kmem_cache *request_pool;
101 enum blkvsc_device_type device_type;
102 struct list_head pending_list;
104 unsigned char device_id[64];
105 unsigned int device_id_len;
106 int num_outstanding_reqs;
108 unsigned int sector_size;
112 unsigned char target;
116 static DEFINE_MUTEX(blkvsc_mutex);
118 static const char *g_blk_driver_name = "blkvsc";
120 /* {32412632-86cb-44a2-9b5c-50d1417354f5} */
121 static const struct hv_guid g_blk_device_type = {
123 0x32, 0x26, 0x41, 0x32, 0xcb, 0x86, 0xa2, 0x44,
124 0x9b, 0x5c, 0x50, 0xd1, 0x41, 0x73, 0x54, 0xf5
129 * There is a circular dependency involving blkvsc_request_completion()
130 * and blkvsc_do_request().
132 static void blkvsc_request_completion(struct hv_storvsc_request *request);
134 static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
136 module_param(blkvsc_ringbuffer_size, int, S_IRUGO);
137 MODULE_PARM_DESC(ring_size, "Ring buffer size (in bytes)");
140 * There is a circular dependency involving blkvsc_probe()
143 static int blkvsc_probe(struct device *dev);
145 static int blk_vsc_on_device_add(struct hv_device *device,
146 void *additional_info)
148 struct storvsc_device_info *device_info;
151 device_info = (struct storvsc_device_info *)additional_info;
153 ret = stor_vsc_on_device_add(device, additional_info);
158 * We need to use the device instance guid to set the path and target
159 * id. For IDE devices, the device instance id is formatted as
160 * <bus id> * - <device id> - 8899 - 000000000000.
162 device_info->path_id = device->dev_instance.data[3] << 24 |
163 device->dev_instance.data[2] << 16 |
164 device->dev_instance.data[1] << 8 |
165 device->dev_instance.data[0];
167 device_info->target_id = device->dev_instance.data[5] << 8 |
168 device->dev_instance.data[4];
174 static int blk_vsc_initialize(struct hv_driver *driver)
176 struct storvsc_driver_object *stor_driver;
179 stor_driver = hvdr_to_stordr(driver);
181 /* Make sure we are at least 2 pages since 1 page is used for control */
183 driver->name = g_blk_driver_name;
184 memcpy(&driver->dev_type, &g_blk_device_type, sizeof(struct hv_guid));
188 * Divide the ring buffer data size (which is 1 page less than the ring
189 * buffer size since that page is reserved for the ring buffer indices)
190 * by the max request size (which is
191 * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
193 stor_driver->max_outstanding_req_per_channel =
194 ((stor_driver->ring_buffer_size - PAGE_SIZE) /
195 ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
196 sizeof(struct vstor_packet) + sizeof(u64),
199 DPRINT_INFO(BLKVSC, "max io outstd %u",
200 stor_driver->max_outstanding_req_per_channel);
202 /* Setup the dispatch table */
203 stor_driver->base.dev_add = blk_vsc_on_device_add;
204 stor_driver->base.dev_rm = stor_vsc_on_device_remove;
205 stor_driver->base.cleanup = stor_vsc_on_cleanup;
206 stor_driver->on_io_request = stor_vsc_on_io_request;
212 static int blkvsc_submit_request(struct blkvsc_request *blkvsc_req,
213 void (*request_completion)(struct hv_storvsc_request *))
215 struct block_device_context *blkdev = blkvsc_req->dev;
216 struct hv_device *device_ctx = blkdev->device_ctx;
217 struct hv_driver *drv =
218 drv_to_hv_drv(device_ctx->device.driver);
219 struct storvsc_driver_object *storvsc_drv_obj =
221 struct hv_storvsc_request *storvsc_req;
222 struct vmscsi_request *vm_srb;
226 storvsc_req = &blkvsc_req->request;
227 vm_srb = &storvsc_req->vstor_packet.vm_srb;
229 vm_srb->data_in = blkvsc_req->write ? WRITE_TYPE : READ_TYPE;
231 storvsc_req->on_io_completion = request_completion;
232 storvsc_req->context = blkvsc_req;
234 vm_srb->port_number = blkdev->port;
235 vm_srb->path_id = blkdev->path;
236 vm_srb->target_id = blkdev->target;
237 vm_srb->lun = 0; /* this is not really used at all */
239 vm_srb->cdb_length = blkvsc_req->cmd_len;
241 memcpy(vm_srb->cdb, blkvsc_req->cmnd, vm_srb->cdb_length);
243 storvsc_req->sense_buffer = blkvsc_req->sense_buffer;
245 ret = storvsc_drv_obj->on_io_request(blkdev->device_ctx,
246 &blkvsc_req->request);
248 blkdev->num_outstanding_reqs++;
254 static int blkvsc_open(struct block_device *bdev, fmode_t mode)
256 struct block_device_context *blkdev = bdev->bd_disk->private_data;
258 spin_lock(&blkdev->lock);
262 spin_unlock(&blkdev->lock);
268 static int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg)
270 sector_t nsect = get_capacity(bd->bd_disk);
271 sector_t cylinders = nsect;
274 * We are making up these values; let us keep it simple.
278 sector_div(cylinders, hg->heads * hg->sectors);
279 hg->cylinders = cylinders;
280 if ((sector_t)(hg->cylinders + 1) * hg->heads * hg->sectors < nsect)
281 hg->cylinders = 0xffff;
287 static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req)
290 blkvsc_req->cmd_len = 16;
292 if (rq_data_dir(blkvsc_req->req)) {
293 blkvsc_req->write = 1;
294 blkvsc_req->cmnd[0] = WRITE_16;
296 blkvsc_req->write = 0;
297 blkvsc_req->cmnd[0] = READ_16;
300 blkvsc_req->cmnd[1] |=
301 (blkvsc_req->req->cmd_flags & REQ_FUA) ? 0x8 : 0;
303 *(unsigned long long *)&blkvsc_req->cmnd[2] =
304 cpu_to_be64(blkvsc_req->sector_start);
305 *(unsigned int *)&blkvsc_req->cmnd[10] =
306 cpu_to_be32(blkvsc_req->sector_count);
310 static int blkvsc_ioctl(struct block_device *bd, fmode_t mode,
311 unsigned cmd, unsigned long arg)
313 struct block_device_context *blkdev = bd->bd_disk->private_data;
317 case HDIO_GET_IDENTITY:
318 if (copy_to_user((void __user *)arg, blkdev->device_id,
319 blkdev->device_id_len))
330 static void blkvsc_cmd_completion(struct hv_storvsc_request *request)
332 struct blkvsc_request *blkvsc_req =
333 (struct blkvsc_request *)request->context;
334 struct block_device_context *blkdev =
335 (struct block_device_context *)blkvsc_req->dev;
336 struct scsi_sense_hdr sense_hdr;
337 struct vmscsi_request *vm_srb;
340 vm_srb = &blkvsc_req->request.vstor_packet.vm_srb;
341 blkdev->num_outstanding_reqs--;
343 if (vm_srb->scsi_status)
344 if (scsi_normalize_sense(blkvsc_req->sense_buffer,
345 SCSI_SENSE_BUFFERSIZE, &sense_hdr))
346 scsi_print_sense_hdr("blkvsc", &sense_hdr);
348 complete(&blkvsc_req->request.wait_event);
352 static int blkvsc_do_operation(struct block_device_context *blkdev,
353 enum blkvsc_op_type op)
355 struct blkvsc_request *blkvsc_req;
356 struct page *page_buf;
358 unsigned char device_type;
359 struct scsi_sense_hdr sense_hdr;
360 struct vmscsi_request *vm_srb;
364 blkvsc_req = kmem_cache_zalloc(blkdev->request_pool, GFP_KERNEL);
368 page_buf = alloc_page(GFP_KERNEL);
370 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
374 vm_srb = &blkvsc_req->request.vstor_packet.vm_srb;
375 init_completion(&blkvsc_req->request.wait_event);
376 blkvsc_req->dev = blkdev;
377 blkvsc_req->req = NULL;
378 blkvsc_req->write = 0;
380 blkvsc_req->request.data_buffer.pfn_array[0] =
381 page_to_pfn(page_buf);
382 blkvsc_req->request.data_buffer.offset = 0;
386 blkvsc_req->cmnd[0] = INQUIRY;
387 blkvsc_req->cmnd[1] = 0x1; /* Get product data */
388 blkvsc_req->cmnd[2] = 0x83; /* mode page 83 */
389 blkvsc_req->cmnd[4] = 64;
390 blkvsc_req->cmd_len = 6;
391 blkvsc_req->request.data_buffer.len = 64;
395 blkdev->sector_size = 0;
396 blkdev->capacity = 0;
398 blkvsc_req->cmnd[0] = READ_CAPACITY;
399 blkvsc_req->cmd_len = 16;
400 blkvsc_req->request.data_buffer.len = 8;
404 blkvsc_req->cmnd[0] = SYNCHRONIZE_CACHE;
405 blkvsc_req->cmd_len = 10;
406 blkvsc_req->request.data_buffer.pfn_array[0] = 0;
407 blkvsc_req->request.data_buffer.len = 0;
414 blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
416 wait_for_completion_interruptible(&blkvsc_req->request.wait_event);
419 if (vm_srb->scsi_status) {
420 scsi_normalize_sense(blkvsc_req->sense_buffer,
421 SCSI_SENSE_BUFFERSIZE, &sense_hdr);
426 buf = kmap(page_buf);
430 device_type = buf[0] & 0x1F;
432 if (device_type == 0x0)
433 blkdev->device_type = HARDDISK_TYPE;
435 blkdev->device_type = UNKNOWN_DEV_TYPE;
437 blkdev->device_id_len = buf[7];
438 if (blkdev->device_id_len > 64)
439 blkdev->device_id_len = 64;
441 memcpy(blkdev->device_id, &buf[8], blkdev->device_id_len);
447 ((buf[0] << 24) | (buf[1] << 16) |
448 (buf[2] << 8) | buf[3]) + 1;
450 blkdev->sector_size =
451 (buf[4] << 24) | (buf[5] << 16) |
452 (buf[6] << 8) | buf[7];
463 __free_page(page_buf);
465 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
471 static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev)
473 struct blkvsc_request *pend_req, *tmp;
474 struct blkvsc_request *comp_req, *tmp2;
475 struct vmscsi_request *vm_srb;
480 /* Flush the pending list first */
481 list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list,
484 * The pend_req could be part of a partially completed
485 * request. If so, complete those req first until we
488 list_for_each_entry_safe(comp_req, tmp2,
489 &pend_req->group->blkvsc_req_list,
492 if (comp_req == pend_req)
495 list_del(&comp_req->req_entry);
499 &comp_req->request.vstor_packet.
501 ret = __blk_end_request(comp_req->req,
502 (!vm_srb->scsi_status ? 0 : -EIO),
503 comp_req->sector_count *
504 blkdev->sector_size);
506 /* FIXME: shouldn't this do more than return? */
511 kmem_cache_free(blkdev->request_pool, comp_req);
514 list_del(&pend_req->pend_entry);
516 list_del(&pend_req->req_entry);
519 if (!__blk_end_request(pend_req->req, -EIO,
520 pend_req->sector_count *
521 blkdev->sector_size)) {
523 * All the sectors have been xferred ie the
526 kmem_cache_free(blkdev->request_pool,
531 kmem_cache_free(blkdev->request_pool, pend_req);
540 * blkvsc_remove() - Callback when our device is removed
542 static int blkvsc_remove(struct device *device)
544 struct hv_driver *drv =
545 drv_to_hv_drv(device->driver);
546 struct storvsc_driver_object *storvsc_drv_obj =
548 struct hv_device *device_obj = device_to_hv_device(device);
549 struct block_device_context *blkdev = dev_get_drvdata(device);
553 * Call to the vsc driver to let it know that the device is being
556 storvsc_drv_obj->base.dev_rm(device_obj);
558 /* Get to a known state */
559 spin_lock_irqsave(&blkdev->lock, flags);
561 blkdev->shutting_down = 1;
563 blk_stop_queue(blkdev->gd->queue);
565 spin_unlock_irqrestore(&blkdev->lock, flags);
567 while (blkdev->num_outstanding_reqs) {
568 DPRINT_INFO(STORVSC, "waiting for %d requests to complete...",
569 blkdev->num_outstanding_reqs);
573 blkvsc_do_operation(blkdev, DO_FLUSH);
575 spin_lock_irqsave(&blkdev->lock, flags);
577 blkvsc_cancel_pending_reqs(blkdev);
579 spin_unlock_irqrestore(&blkdev->lock, flags);
581 blk_cleanup_queue(blkdev->gd->queue);
583 del_gendisk(blkdev->gd);
585 kmem_cache_destroy(blkdev->request_pool);
593 static void blkvsc_shutdown(struct device *device)
595 struct block_device_context *blkdev = dev_get_drvdata(device);
601 spin_lock_irqsave(&blkdev->lock, flags);
603 blkdev->shutting_down = 1;
605 blk_stop_queue(blkdev->gd->queue);
607 spin_unlock_irqrestore(&blkdev->lock, flags);
609 while (blkdev->num_outstanding_reqs) {
610 DPRINT_INFO(STORVSC, "waiting for %d requests to complete...",
611 blkdev->num_outstanding_reqs);
615 blkvsc_do_operation(blkdev, DO_FLUSH);
617 spin_lock_irqsave(&blkdev->lock, flags);
619 blkvsc_cancel_pending_reqs(blkdev);
621 spin_unlock_irqrestore(&blkdev->lock, flags);
624 static int blkvsc_release(struct gendisk *disk, fmode_t mode)
626 struct block_device_context *blkdev = disk->private_data;
628 mutex_lock(&blkvsc_mutex);
629 spin_lock(&blkdev->lock);
630 if (blkdev->users == 1) {
631 spin_unlock(&blkdev->lock);
632 blkvsc_do_operation(blkdev, DO_FLUSH);
633 spin_lock(&blkdev->lock);
638 spin_unlock(&blkdev->lock);
639 mutex_unlock(&blkvsc_mutex);
645 * We break the request into 1 or more blkvsc_requests and submit
646 * them. If we cant submit them all, we put them on the
647 * pending_list. The blkvsc_request() will work on the pending_list.
649 static int blkvsc_do_request(struct block_device_context *blkdev,
652 struct bio *bio = NULL;
653 struct bio_vec *bvec = NULL;
654 struct bio_vec *prev_bvec = NULL;
655 struct blkvsc_request *blkvsc_req = NULL;
656 struct blkvsc_request *tmp;
659 sector_t start_sector;
660 unsigned long num_sectors = 0;
663 struct blkvsc_request_group *group = NULL;
665 /* Create a group to tie req to list of blkvsc_reqs */
666 group = kmem_cache_zalloc(blkdev->request_pool, GFP_ATOMIC);
670 INIT_LIST_HEAD(&group->blkvsc_req_list);
671 group->outstanding = group->status = 0;
673 start_sector = blk_rq_pos(req);
675 /* foreach bio in the request */
677 for (bio = req->bio; bio; bio = bio->bi_next) {
679 * Map this bio into an existing or new storvsc request
681 bio_for_each_segment(bvec, bio, seg_idx) {
682 /* Get a new storvsc request */
685 (databuf_idx >= MAX_MULTIPAGE_BUFFER_COUNT)
686 /* hole at the begin of page */
687 || (bvec->bv_offset != 0) ||
688 /* hold at the end of page */
690 (prev_bvec->bv_len != PAGE_SIZE))) {
691 /* submit the prev one */
693 blkvsc_req->sector_start =
696 blkvsc_req->sector_start,
697 (blkdev->sector_size >> 9));
699 blkvsc_req->sector_count =
701 (blkdev->sector_size >> 9);
702 blkvsc_init_rw(blkvsc_req);
706 * Create new blkvsc_req to represent
711 blkdev->request_pool, GFP_ATOMIC);
713 /* free up everything */
714 list_for_each_entry_safe(
716 &group->blkvsc_req_list,
719 &blkvsc_req->req_entry);
721 blkdev->request_pool,
726 blkdev->request_pool, group);
730 memset(blkvsc_req, 0,
731 sizeof(struct blkvsc_request));
733 blkvsc_req->dev = blkdev;
734 blkvsc_req->req = req;
741 /* Add to the group */
742 blkvsc_req->group = group;
743 blkvsc_req->group->outstanding++;
744 list_add_tail(&blkvsc_req->req_entry,
745 &blkvsc_req->group->blkvsc_req_list);
747 start_sector += num_sectors;
753 * Add the curr bvec/segment to the curr
756 blkvsc_req->request.data_buffer.
757 pfn_array[databuf_idx]
758 = page_to_pfn(bvec->bv_page);
759 blkvsc_req->request.data_buffer.len
765 num_sectors += bvec->bv_len >> 9;
767 } /* bio_for_each_segment */
769 } /* rq_for_each_bio */
772 /* Handle the last one */
774 blkvsc_req->sector_start = start_sector;
775 sector_div(blkvsc_req->sector_start,
776 (blkdev->sector_size >> 9));
778 blkvsc_req->sector_count = num_sectors /
779 (blkdev->sector_size >> 9);
781 blkvsc_init_rw(blkvsc_req);
784 list_for_each_entry(blkvsc_req, &group->blkvsc_req_list, req_entry) {
787 list_add_tail(&blkvsc_req->pend_entry,
788 &blkdev->pending_list);
790 ret = blkvsc_submit_request(blkvsc_req,
791 blkvsc_request_completion);
794 list_add_tail(&blkvsc_req->pend_entry,
795 &blkdev->pending_list);
804 static int blkvsc_do_pending_reqs(struct block_device_context *blkdev)
806 struct blkvsc_request *pend_req, *tmp;
809 /* Flush the pending list first */
810 list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list,
813 ret = blkvsc_submit_request(pend_req,
814 blkvsc_request_completion);
818 list_del(&pend_req->pend_entry);
825 static void blkvsc_request(struct request_queue *queue)
827 struct block_device_context *blkdev = NULL;
831 while ((req = blk_peek_request(queue)) != NULL) {
833 blkdev = req->rq_disk->private_data;
834 if (blkdev->shutting_down || req->cmd_type != REQ_TYPE_FS) {
835 __blk_end_request_cur(req, 0);
839 ret = blkvsc_do_pending_reqs(blkdev);
842 blk_stop_queue(queue);
846 blk_start_request(req);
848 ret = blkvsc_do_request(blkdev, req);
850 blk_stop_queue(queue);
852 } else if (ret < 0) {
853 blk_requeue_request(queue, req);
854 blk_stop_queue(queue);
862 /* The one and only one */
863 static struct storvsc_driver_object g_blkvsc_drv;
865 static const struct block_device_operations block_ops = {
866 .owner = THIS_MODULE,
868 .release = blkvsc_release,
869 .getgeo = blkvsc_getgeo,
870 .ioctl = blkvsc_ioctl,
874 * blkvsc_drv_init - BlkVsc driver initialization.
876 static int blkvsc_drv_init(void)
878 struct storvsc_driver_object *storvsc_drv_obj = &g_blkvsc_drv;
879 struct hv_driver *drv = &g_blkvsc_drv.base;
882 storvsc_drv_obj->ring_buffer_size = blkvsc_ringbuffer_size;
884 drv->priv = storvsc_drv_obj;
886 /* Callback to client driver to complete the initialization */
887 blk_vsc_initialize(&storvsc_drv_obj->base);
889 drv->driver.name = storvsc_drv_obj->base.name;
891 drv->driver.probe = blkvsc_probe;
892 drv->driver.remove = blkvsc_remove;
893 drv->driver.shutdown = blkvsc_shutdown;
895 /* The driver belongs to vmbus */
896 ret = vmbus_child_driver_register(&drv->driver);
901 static int blkvsc_drv_exit_cb(struct device *dev, void *data)
903 struct device **curr = (struct device **)data;
905 return 1; /* stop iterating */
908 static void blkvsc_drv_exit(void)
910 struct storvsc_driver_object *storvsc_drv_obj = &g_blkvsc_drv;
911 struct hv_driver *drv = &g_blkvsc_drv.base;
912 struct device *current_dev;
919 ret = driver_for_each_device(&drv->driver, NULL,
920 (void *) ¤t_dev,
924 DPRINT_WARN(BLKVSC_DRV,
925 "driver_for_each_device returned %d", ret);
928 if (current_dev == NULL)
931 /* Initiate removal from the top-down */
932 device_unregister(current_dev);
935 if (storvsc_drv_obj->base.cleanup)
936 storvsc_drv_obj->base.cleanup(&storvsc_drv_obj->base);
938 vmbus_child_driver_unregister(&drv->driver);
944 * blkvsc_probe - Add a new device for this driver
946 static int blkvsc_probe(struct device *device)
948 struct hv_driver *drv =
949 drv_to_hv_drv(device->driver);
950 struct storvsc_driver_object *storvsc_drv_obj =
952 struct hv_device *device_obj = device_to_hv_device(device);
954 struct block_device_context *blkdev = NULL;
955 struct storvsc_device_info device_info;
956 struct storvsc_major_info major_info;
960 blkdev = kzalloc(sizeof(struct block_device_context), GFP_KERNEL);
966 INIT_LIST_HEAD(&blkdev->pending_list);
968 /* Initialize what we can here */
969 spin_lock_init(&blkdev->lock);
972 blkdev->request_pool = kmem_cache_create(dev_name(&device_obj->device),
973 sizeof(struct blkvsc_request), 0,
974 SLAB_HWCACHE_ALIGN, NULL);
975 if (!blkdev->request_pool) {
981 /* Call to the vsc driver to add the device */
982 ret = storvsc_drv_obj->base.dev_add(device_obj, &device_info);
986 blkdev->device_ctx = device_obj;
987 /* this identified the device 0 or 1 */
988 blkdev->target = device_info.target_id;
989 /* this identified the ide ctrl 0 or 1 */
990 blkdev->path = device_info.path_id;
992 dev_set_drvdata(device, blkdev);
994 ret = stor_vsc_get_major_info(&device_info, &major_info);
999 if (major_info.do_register) {
1000 ret = register_blkdev(major_info.major, major_info.devname);
1003 DPRINT_ERR(BLKVSC_DRV,
1004 "register_blkdev() failed! ret %d", ret);
1009 DPRINT_INFO(BLKVSC_DRV, "blkvsc registered for major %d!!",
1012 blkdev->gd = alloc_disk(BLKVSC_MINORS);
1018 blkdev->gd->queue = blk_init_queue(blkvsc_request, &blkdev->lock);
1020 blk_queue_max_segment_size(blkdev->gd->queue, PAGE_SIZE);
1021 blk_queue_max_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT);
1022 blk_queue_segment_boundary(blkdev->gd->queue, PAGE_SIZE-1);
1023 blk_queue_bounce_limit(blkdev->gd->queue, BLK_BOUNCE_ANY);
1024 blk_queue_dma_alignment(blkdev->gd->queue, 511);
1026 blkdev->gd->major = major_info.major;
1027 if (major_info.index == 1 || major_info.index == 3)
1028 blkdev->gd->first_minor = BLKVSC_MINORS;
1030 blkdev->gd->first_minor = 0;
1031 blkdev->gd->fops = &block_ops;
1032 blkdev->gd->events = DISK_EVENT_MEDIA_CHANGE;
1033 blkdev->gd->private_data = blkdev;
1034 blkdev->gd->driverfs_dev = &(blkdev->device_ctx->device);
1035 sprintf(blkdev->gd->disk_name, "hd%c", 'a' + major_info.index);
1037 blkvsc_do_operation(blkdev, DO_INQUIRY);
1038 blkvsc_do_operation(blkdev, DO_CAPACITY);
1040 set_capacity(blkdev->gd, blkdev->capacity * (blkdev->sector_size/512));
1041 blk_queue_logical_block_size(blkdev->gd->queue, blkdev->sector_size);
1043 add_disk(blkdev->gd);
1045 DPRINT_INFO(BLKVSC_DRV, "%s added!! capacity %lu sector_size %d",
1046 blkdev->gd->disk_name, (unsigned long)blkdev->capacity,
1047 blkdev->sector_size);
1052 storvsc_drv_obj->base.dev_rm(device_obj);
1056 if (blkdev->request_pool) {
1057 kmem_cache_destroy(blkdev->request_pool);
1058 blkdev->request_pool = NULL;
1067 static void blkvsc_request_completion(struct hv_storvsc_request *request)
1069 struct blkvsc_request *blkvsc_req =
1070 (struct blkvsc_request *)request->context;
1071 struct block_device_context *blkdev =
1072 (struct block_device_context *)blkvsc_req->dev;
1073 unsigned long flags;
1074 struct blkvsc_request *comp_req, *tmp;
1075 struct vmscsi_request *vm_srb;
1078 spin_lock_irqsave(&blkdev->lock, flags);
1080 blkdev->num_outstanding_reqs--;
1081 blkvsc_req->group->outstanding--;
1084 * Only start processing when all the blkvsc_reqs are
1085 * completed. This guarantees no out-of-order blkvsc_req
1086 * completion when calling end_that_request_first()
1088 if (blkvsc_req->group->outstanding == 0) {
1089 list_for_each_entry_safe(comp_req, tmp,
1090 &blkvsc_req->group->blkvsc_req_list,
1093 list_del(&comp_req->req_entry);
1096 &comp_req->request.vstor_packet.vm_srb;
1097 if (!__blk_end_request(comp_req->req,
1098 (!vm_srb->scsi_status ? 0 : -EIO),
1099 comp_req->sector_count * blkdev->sector_size)) {
1101 * All the sectors have been xferred ie the
1104 kmem_cache_free(blkdev->request_pool,
1108 kmem_cache_free(blkdev->request_pool, comp_req);
1111 if (!blkdev->shutting_down) {
1112 blkvsc_do_pending_reqs(blkdev);
1113 blk_start_queue(blkdev->gd->queue);
1114 blkvsc_request(blkdev->gd->queue);
1118 spin_unlock_irqrestore(&blkdev->lock, flags);
1121 static int __init blkvsc_init(void)
1125 BUILD_BUG_ON(sizeof(sector_t) != 8);
1127 ret = blkvsc_drv_init();
1132 static void __exit blkvsc_exit(void)
1137 MODULE_LICENSE("GPL");
1138 MODULE_VERSION(HV_DRV_VERSION);
1139 MODULE_DESCRIPTION("Microsoft Hyper-V virtual block driver");
1140 module_init(blkvsc_init);
1141 module_exit(blkvsc_exit);