2 * Copyright (c) 2009, Microsoft Corporation.
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
15 * Place - Suite 330, Boston, MA 02111-1307 USA.
18 * Haiyang Zhang <haiyangz@microsoft.com>
19 * Hank Janssen <hjanssen@microsoft.com>
20 * K. Y. Srinivasan <kys@microsoft.com>
22 #include <linux/init.h>
23 #include <linux/slab.h>
24 #include <linux/module.h>
25 #include <linux/device.h>
26 #include <linux/blkdev.h>
27 #include <scsi/scsi.h>
28 #include <scsi/scsi_cmnd.h>
29 #include <scsi/scsi_host.h>
30 #include <scsi/scsi_device.h>
31 #include <scsi/scsi_tcq.h>
32 #include <scsi/scsi_eh.h>
33 #include <scsi/scsi_devinfo.h>
34 #include <scsi/scsi_dbg.h>
37 #include "version_info.h"
39 #include "storvsc_api.h"
44 static const char *driver_name = "storvsc";
46 /* {ba6163d9-04a1-4d29-b605-72e2ffb1dc7f} */
47 static const struct hv_guid gStorVscDeviceType = {
49 0xd9, 0x63, 0x61, 0xba, 0xa1, 0x04, 0x29, 0x4d,
50 0xb6, 0x05, 0x72, 0xe2, 0xff, 0xb1, 0xdc, 0x7f
54 struct hv_host_device {
55 struct hv_device *dev;
56 struct kmem_cache *request_pool;
62 struct storvsc_cmd_request {
63 struct list_head entry;
64 struct scsi_cmnd *cmd;
66 unsigned int bounce_sgl_count;
67 struct scatterlist *bounce_sgl;
69 struct hv_storvsc_request request;
74 * storvsc_initialize - Main entry point
76 static int storvsc_initialize(struct hv_driver *driver)
78 struct storvsc_driver *stor_driver;
80 stor_driver = hvdr_to_stordr(driver);
83 /* Make sure we are at least 2 pages since 1 page is used for control */
85 driver->name = driver_name;
86 memcpy(&driver->dev_type, &gStorVscDeviceType,
87 sizeof(struct hv_guid));
91 * Divide the ring buffer data size (which is 1 page less
92 * than the ring buffer size since that page is reserved for
93 * the ring buffer indices) by the max request size (which is
94 * vmbus_channel_packet_multipage_buffer + struct vstor_packet + u64)
96 stor_driver->max_outstanding_req_per_channel =
97 ((stor_driver->ring_buffer_size - PAGE_SIZE) /
98 ALIGN(MAX_MULTIPAGE_BUFFER_PACKET +
99 sizeof(struct vstor_packet) + sizeof(u64),
102 DPRINT_INFO(STORVSC, "max io %u, currently %u\n",
103 stor_driver->max_outstanding_req_per_channel,
104 STORVSC_MAX_IO_REQUESTS);
106 /* Setup the dispatch table */
107 stor_driver->base.dev_add = storvsc_dev_add;
108 stor_driver->base.dev_rm = storvsc_dev_remove;
110 stor_driver->on_io_request = storvsc_do_io;
115 static int storvsc_device_alloc(struct scsi_device *sdevice)
118 * This enables luns to be located sparsely. Otherwise, we may not
121 sdevice->sdev_bflags |= BLIST_SPARSELUN | BLIST_LARGELUN;
125 static int storvsc_merge_bvec(struct request_queue *q,
126 struct bvec_merge_data *bmd, struct bio_vec *bvec)
128 /* checking done by caller. */
132 static int storvsc_device_configure(struct scsi_device *sdevice)
134 scsi_adjust_queue_depth(sdevice, MSG_SIMPLE_TAG,
135 STORVSC_MAX_IO_REQUESTS);
137 DPRINT_INFO(STORVSC_DRV, "sdev (%p) - setting max segment size to %ld",
139 blk_queue_max_segment_size(sdevice->request_queue, PAGE_SIZE);
141 DPRINT_INFO(STORVSC_DRV, "sdev (%p) - adding merge bio vec routine",
143 blk_queue_merge_bvec(sdevice->request_queue, storvsc_merge_bvec);
145 blk_queue_bounce_limit(sdevice->request_queue, BLK_BOUNCE_ANY);
146 /* sdevice->timeout = (2000 * HZ);//(75 * HZ); */
151 static void destroy_bounce_buffer(struct scatterlist *sgl,
152 unsigned int sg_count)
155 struct page *page_buf;
157 for (i = 0; i < sg_count; i++) {
158 page_buf = sg_page((&sgl[i]));
159 if (page_buf != NULL)
160 __free_page(page_buf);
166 static int do_bounce_buffer(struct scatterlist *sgl, unsigned int sg_count)
170 /* No need to check */
174 /* We have at least 2 sg entries */
175 for (i = 0; i < sg_count; i++) {
177 /* make sure 1st one does not have hole */
178 if (sgl[i].offset + sgl[i].length != PAGE_SIZE)
180 } else if (i == sg_count - 1) {
181 /* make sure last one does not have hole */
182 if (sgl[i].offset != 0)
185 /* make sure no hole in the middle */
186 if (sgl[i].length != PAGE_SIZE || sgl[i].offset != 0)
193 static struct scatterlist *create_bounce_buffer(struct scatterlist *sgl,
194 unsigned int sg_count,
199 struct scatterlist *bounce_sgl;
200 struct page *page_buf;
202 num_pages = ALIGN(len, PAGE_SIZE) >> PAGE_SHIFT;
204 bounce_sgl = kcalloc(num_pages, sizeof(struct scatterlist), GFP_ATOMIC);
208 for (i = 0; i < num_pages; i++) {
209 page_buf = alloc_page(GFP_ATOMIC);
212 sg_set_page(&bounce_sgl[i], page_buf, 0, 0);
218 destroy_bounce_buffer(bounce_sgl, num_pages);
223 /* Assume the original sgl has enough room */
224 static unsigned int copy_from_bounce_buffer(struct scatterlist *orig_sgl,
225 struct scatterlist *bounce_sgl,
226 unsigned int orig_sgl_count)
230 unsigned long src, dest;
231 unsigned int srclen, destlen, copylen;
232 unsigned int total_copied = 0;
233 unsigned long bounce_addr = 0;
234 unsigned long dest_addr = 0;
237 local_irq_save(flags);
239 for (i = 0; i < orig_sgl_count; i++) {
240 dest_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
241 KM_IRQ0) + orig_sgl[i].offset;
243 destlen = orig_sgl[i].length;
245 if (bounce_addr == 0)
247 (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
251 src = bounce_addr + bounce_sgl[j].offset;
252 srclen = bounce_sgl[j].length - bounce_sgl[j].offset;
254 copylen = min(srclen, destlen);
255 memcpy((void *)dest, (void *)src, copylen);
257 total_copied += copylen;
258 bounce_sgl[j].offset += copylen;
262 if (bounce_sgl[j].offset == bounce_sgl[j].length) {
264 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
267 /* if we need to use another bounce buffer */
268 if (destlen || i != orig_sgl_count - 1)
270 (unsigned long)kmap_atomic(
271 sg_page((&bounce_sgl[j])), KM_IRQ0);
272 } else if (destlen == 0 && i == orig_sgl_count - 1) {
273 /* unmap the last bounce that is < PAGE_SIZE */
274 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
278 kunmap_atomic((void *)(dest_addr - orig_sgl[i].offset),
282 local_irq_restore(flags);
288 /* Assume the bounce_sgl has enough room ie using the create_bounce_buffer() */
289 static unsigned int copy_to_bounce_buffer(struct scatterlist *orig_sgl,
290 struct scatterlist *bounce_sgl,
291 unsigned int orig_sgl_count)
295 unsigned long src, dest;
296 unsigned int srclen, destlen, copylen;
297 unsigned int total_copied = 0;
298 unsigned long bounce_addr = 0;
299 unsigned long src_addr = 0;
302 local_irq_save(flags);
304 for (i = 0; i < orig_sgl_count; i++) {
305 src_addr = (unsigned long)kmap_atomic(sg_page((&orig_sgl[i])),
306 KM_IRQ0) + orig_sgl[i].offset;
308 srclen = orig_sgl[i].length;
310 if (bounce_addr == 0)
312 (unsigned long)kmap_atomic(sg_page((&bounce_sgl[j])),
316 /* assume bounce offset always == 0 */
317 dest = bounce_addr + bounce_sgl[j].length;
318 destlen = PAGE_SIZE - bounce_sgl[j].length;
320 copylen = min(srclen, destlen);
321 memcpy((void *)dest, (void *)src, copylen);
323 total_copied += copylen;
324 bounce_sgl[j].length += copylen;
328 if (bounce_sgl[j].length == PAGE_SIZE) {
329 /* full..move to next entry */
330 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
333 /* if we need to use another bounce buffer */
334 if (srclen || i != orig_sgl_count - 1)
336 (unsigned long)kmap_atomic(
337 sg_page((&bounce_sgl[j])), KM_IRQ0);
339 } else if (srclen == 0 && i == orig_sgl_count - 1) {
340 /* unmap the last bounce that is < PAGE_SIZE */
341 kunmap_atomic((void *)bounce_addr, KM_IRQ0);
345 kunmap_atomic((void *)(src_addr - orig_sgl[i].offset), KM_IRQ0);
348 local_irq_restore(flags);
355 * storvsc_remove - Callback when our device is removed
357 static int storvsc_remove(struct hv_device *dev)
359 struct storvsc_driver *storvsc_drv_obj =
360 drv_to_stordrv(dev->device.driver);
361 struct Scsi_Host *host = dev_get_drvdata(&dev->device);
362 struct hv_host_device *host_dev =
363 (struct hv_host_device *)host->hostdata;
366 * Call to the vsc driver to let it know that the device is being
369 storvsc_drv_obj->base.dev_rm(dev);
371 if (host_dev->request_pool) {
372 kmem_cache_destroy(host_dev->request_pool);
373 host_dev->request_pool = NULL;
376 DPRINT_INFO(STORVSC, "removing host adapter (%p)...", host);
377 scsi_remove_host(host);
379 DPRINT_INFO(STORVSC, "releasing host adapter (%p)...", host);
385 static int storvsc_get_chs(struct scsi_device *sdev, struct block_device * bdev,
386 sector_t capacity, int *info)
388 sector_t total_sectors = capacity;
389 sector_t cylinder_times_heads = 0;
392 int sectors_per_track = 0;
397 if (total_sectors > (65535 * 16 * 255))
398 total_sectors = (65535 * 16 * 255);
400 if (total_sectors >= (65535 * 16 * 63)) {
401 sectors_per_track = 255;
404 cylinder_times_heads = total_sectors;
405 /* sector_div stores the quotient in cylinder_times_heads */
406 rem = sector_div(cylinder_times_heads, sectors_per_track);
408 sectors_per_track = 17;
410 cylinder_times_heads = total_sectors;
411 /* sector_div stores the quotient in cylinder_times_heads */
412 rem = sector_div(cylinder_times_heads, sectors_per_track);
414 temp = cylinder_times_heads + 1023;
415 /* sector_div stores the quotient in temp */
416 rem = sector_div(temp, 1024);
423 if (cylinder_times_heads >= (heads * 1024) || (heads > 16)) {
424 sectors_per_track = 31;
427 cylinder_times_heads = total_sectors;
429 * sector_div stores the quotient in
430 * cylinder_times_heads
432 rem = sector_div(cylinder_times_heads,
436 if (cylinder_times_heads >= (heads * 1024)) {
437 sectors_per_track = 63;
440 cylinder_times_heads = total_sectors;
442 * sector_div stores the quotient in
443 * cylinder_times_heads
445 rem = sector_div(cylinder_times_heads,
450 temp = cylinder_times_heads;
451 /* sector_div stores the quotient in temp */
452 rem = sector_div(temp, heads);
456 info[1] = sectors_per_track;
459 DPRINT_INFO(STORVSC_DRV, "CHS (%d, %d, %d)", cylinders, heads,
465 static int storvsc_probe(struct hv_device *dev);
466 static int storvsc_queuecommand(struct Scsi_Host *shost, struct scsi_cmnd *scmnd);
467 static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd);
469 static int storvsc_ringbuffer_size = STORVSC_RING_BUFFER_SIZE;
470 module_param(storvsc_ringbuffer_size, int, S_IRUGO);
471 MODULE_PARM_DESC(storvsc_ringbuffer_size, "Ring buffer size (bytes)");
473 /* The one and only one */
474 static struct storvsc_driver g_storvsc_drv;
477 static struct scsi_host_template scsi_driver = {
478 .module = THIS_MODULE,
479 .name = "storvsc_host_t",
480 .bios_param = storvsc_get_chs,
481 .queuecommand = storvsc_queuecommand,
482 .eh_host_reset_handler = storvsc_host_reset_handler,
483 .slave_alloc = storvsc_device_alloc,
484 .slave_configure = storvsc_device_configure,
486 /* 64 max_queue * 1 target */
487 .can_queue = STORVSC_MAX_IO_REQUESTS*STORVSC_MAX_TARGETS,
489 /* no use setting to 0 since ll_blk_rw reset it to 1 */
491 .sg_tablesize = MAX_MULTIPAGE_BUFFER_COUNT,
493 * ENABLE_CLUSTERING allows mutiple physically contig bio_vecs to merge
494 * into 1 sg element. If set, we must limit the max_segment_size to
495 * PAGE_SIZE, otherwise we may get 1 sg element that represents
498 /* physically contig pfns (ie sg[x].length > PAGE_SIZE). */
499 .use_clustering = ENABLE_CLUSTERING,
500 /* Make sure we dont get a sg segment crosses a page boundary */
501 .dma_boundary = PAGE_SIZE-1,
506 * storvsc_drv_init - StorVsc driver initialization.
508 static int storvsc_drv_init(void)
511 struct storvsc_driver *storvsc_drv_obj = &g_storvsc_drv;
512 struct hv_driver *drv = &g_storvsc_drv.base;
514 storvsc_drv_obj->ring_buffer_size = storvsc_ringbuffer_size;
516 /* Callback to client driver to complete the initialization */
517 storvsc_initialize(&storvsc_drv_obj->base);
519 DPRINT_INFO(STORVSC_DRV,
520 "max outstanding reqs %u",
521 storvsc_drv_obj->max_outstanding_req_per_channel);
523 if (storvsc_drv_obj->max_outstanding_req_per_channel <
524 STORVSC_MAX_IO_REQUESTS)
527 drv->driver.name = storvsc_drv_obj->base.name;
529 drv->probe = storvsc_probe;
530 drv->remove = storvsc_remove;
532 /* The driver belongs to vmbus */
533 ret = vmbus_child_driver_register(&drv->driver);
539 static int storvsc_host_reset(struct hv_device *device)
541 struct storvsc_device *stor_device;
542 struct hv_storvsc_request *request;
543 struct vstor_packet *vstor_packet;
546 DPRINT_INFO(STORVSC, "resetting host adapter...");
548 stor_device = get_stor_device(device);
552 request = &stor_device->reset_request;
553 vstor_packet = &request->vstor_packet;
555 init_completion(&request->wait_event);
557 vstor_packet->operation = VSTOR_OPERATION_RESET_BUS;
558 vstor_packet->flags = REQUEST_COMPLETION_FLAG;
559 vstor_packet->vm_srb.path_id = stor_device->path_id;
561 ret = vmbus_sendpacket(device->channel, vstor_packet,
562 sizeof(struct vstor_packet),
563 (unsigned long)&stor_device->reset_request,
565 VMBUS_DATA_PACKET_FLAG_COMPLETION_REQUESTED);
569 t = wait_for_completion_timeout(&request->wait_event, HZ);
575 DPRINT_INFO(STORVSC, "host adapter reset completed");
578 * At this point, all outstanding requests in the adapter
579 * should have been flushed out and return to us
583 put_stor_device(device);
587 static int storvsc_drv_exit_cb(struct device *dev, void *data)
589 struct device **curr = (struct device **)data;
591 return 1; /* stop iterating */
594 static void storvsc_drv_exit(void)
596 struct storvsc_driver *storvsc_drv_obj = &g_storvsc_drv;
597 struct hv_driver *drv = &g_storvsc_drv.base;
598 struct device *current_dev = NULL;
605 ret = driver_for_each_device(&drv->driver, NULL,
606 (void *) ¤t_dev,
607 storvsc_drv_exit_cb);
610 if (current_dev == NULL)
613 /* Initiate removal from the top-down */
614 device_unregister(current_dev);
617 if (storvsc_drv_obj->base.cleanup)
618 storvsc_drv_obj->base.cleanup(&storvsc_drv_obj->base);
620 vmbus_child_driver_unregister(&drv->driver);
625 * storvsc_probe - Add a new device for this driver
627 static int storvsc_probe(struct hv_device *device)
630 struct storvsc_driver *storvsc_drv_obj =
631 drv_to_stordrv(device->device.driver);
632 struct Scsi_Host *host;
633 struct hv_host_device *host_dev;
634 struct storvsc_device_info device_info;
636 if (!storvsc_drv_obj->base.dev_add)
639 host = scsi_host_alloc(&scsi_driver,
640 sizeof(struct hv_host_device));
644 dev_set_drvdata(&device->device, host);
646 host_dev = (struct hv_host_device *)host->hostdata;
647 memset(host_dev, 0, sizeof(struct hv_host_device));
649 host_dev->port = host->host_no;
650 host_dev->dev = device;
652 host_dev->request_pool =
653 kmem_cache_create(dev_name(&device->device),
654 sizeof(struct storvsc_cmd_request), 0,
655 SLAB_HWCACHE_ALIGN, NULL);
657 if (!host_dev->request_pool) {
662 device_info.port_number = host->host_no;
663 /* Call to the vsc driver to add the device */
664 ret = storvsc_drv_obj->base.dev_add(device, (void *)&device_info);
667 kmem_cache_destroy(host_dev->request_pool);
672 host_dev->path = device_info.path_id;
673 host_dev->target = device_info.target_id;
675 /* max # of devices per target */
676 host->max_lun = STORVSC_MAX_LUNS_PER_TARGET;
677 /* max # of targets per channel */
678 host->max_id = STORVSC_MAX_TARGETS;
679 /* max # of channels */
680 host->max_channel = STORVSC_MAX_CHANNELS - 1;
682 /* Register the HBA and start the scsi bus scan */
683 ret = scsi_add_host(host, &device->device);
686 storvsc_drv_obj->base.dev_rm(device);
688 kmem_cache_destroy(host_dev->request_pool);
693 scsi_scan_host(host);
698 * storvsc_commmand_completion - Command completion processing
700 static void storvsc_commmand_completion(struct hv_storvsc_request *request)
702 struct storvsc_cmd_request *cmd_request =
703 (struct storvsc_cmd_request *)request->context;
704 struct scsi_cmnd *scmnd = cmd_request->cmd;
705 struct hv_host_device *host_dev =
706 (struct hv_host_device *)scmnd->device->host->hostdata;
707 void (*scsi_done_fn)(struct scsi_cmnd *);
708 struct scsi_sense_hdr sense_hdr;
709 struct vmscsi_request *vm_srb;
711 /* ASSERT(request == &cmd_request->request); */
713 /* ASSERT((unsigned long)scmnd->host_scribble == */
714 /* (unsigned long)cmd_request); */
715 /* ASSERT(scmnd->scsi_done); */
717 if (cmd_request->bounce_sgl_count) {
718 /* using bounce buffer */
719 /* printk("copy_from_bounce_buffer\n"); */
721 /* FIXME: We can optimize on writes by just skipping this */
722 copy_from_bounce_buffer(scsi_sglist(scmnd),
723 cmd_request->bounce_sgl,
724 scsi_sg_count(scmnd));
725 destroy_bounce_buffer(cmd_request->bounce_sgl,
726 cmd_request->bounce_sgl_count);
729 vm_srb = &request->vstor_packet.vm_srb;
730 scmnd->result = vm_srb->scsi_status;
733 if (scsi_normalize_sense(scmnd->sense_buffer,
734 SCSI_SENSE_BUFFERSIZE, &sense_hdr))
735 scsi_print_sense_hdr("storvsc", &sense_hdr);
738 /* ASSERT(request->BytesXfer <= request->data_buffer.Length); */
739 scsi_set_resid(scmnd,
740 request->data_buffer.len -
741 vm_srb->data_transfer_length);
743 scsi_done_fn = scmnd->scsi_done;
745 scmnd->host_scribble = NULL;
746 scmnd->scsi_done = NULL;
748 /* !!DO NOT MODIFY the scmnd after this call */
751 kmem_cache_free(host_dev->request_pool, cmd_request);
755 * storvsc_queuecommand - Initiate command processing
757 static int storvsc_queuecommand_lck(struct scsi_cmnd *scmnd,
758 void (*done)(struct scsi_cmnd *))
761 struct hv_host_device *host_dev =
762 (struct hv_host_device *)scmnd->device->host->hostdata;
763 struct hv_device *dev = host_dev->dev;
764 struct storvsc_driver *storvsc_drv_obj =
765 drv_to_stordrv(dev->device.driver);
766 struct hv_storvsc_request *request;
767 struct storvsc_cmd_request *cmd_request;
768 unsigned int request_size = 0;
770 struct scatterlist *sgl;
771 unsigned int sg_count = 0;
772 struct vmscsi_request *vm_srb;
775 /* If retrying, no need to prep the cmd */
776 if (scmnd->host_scribble) {
777 /* ASSERT(scmnd->scsi_done != NULL); */
780 (struct storvsc_cmd_request *)scmnd->host_scribble;
781 DPRINT_INFO(STORVSC_DRV, "retrying scmnd %p cmd_request %p",
787 /* ASSERT(scmnd->scsi_done == NULL); */
788 /* ASSERT(scmnd->host_scribble == NULL); */
790 scmnd->scsi_done = done;
792 request_size = sizeof(struct storvsc_cmd_request);
794 cmd_request = kmem_cache_zalloc(host_dev->request_pool,
797 scmnd->scsi_done = NULL;
798 return SCSI_MLQUEUE_DEVICE_BUSY;
801 /* Setup the cmd request */
802 cmd_request->bounce_sgl_count = 0;
803 cmd_request->bounce_sgl = NULL;
804 cmd_request->cmd = scmnd;
806 scmnd->host_scribble = (unsigned char *)cmd_request;
808 request = &cmd_request->request;
809 vm_srb = &request->vstor_packet.vm_srb;
813 switch (scmnd->sc_data_direction) {
815 vm_srb->data_in = WRITE_TYPE;
817 case DMA_FROM_DEVICE:
818 vm_srb->data_in = READ_TYPE;
821 vm_srb->data_in = UNKNOWN_TYPE;
825 request->on_io_completion = storvsc_commmand_completion;
826 request->context = cmd_request;/* scmnd; */
828 /* request->PortId = scmnd->device->channel; */
829 vm_srb->port_number = host_dev->port;
830 vm_srb->path_id = scmnd->device->channel;
831 vm_srb->target_id = scmnd->device->id;
832 vm_srb->lun = scmnd->device->lun;
834 /* ASSERT(scmnd->cmd_len <= 16); */
835 vm_srb->cdb_length = scmnd->cmd_len;
837 memcpy(vm_srb->cdb, scmnd->cmnd, vm_srb->cdb_length);
839 request->sense_buffer = scmnd->sense_buffer;
842 request->data_buffer.len = scsi_bufflen(scmnd);
843 if (scsi_sg_count(scmnd)) {
844 sgl = (struct scatterlist *)scsi_sglist(scmnd);
845 sg_count = scsi_sg_count(scmnd);
847 /* check if we need to bounce the sgl */
848 if (do_bounce_buffer(sgl, scsi_sg_count(scmnd)) != -1) {
849 cmd_request->bounce_sgl =
850 create_bounce_buffer(sgl, scsi_sg_count(scmnd),
851 scsi_bufflen(scmnd));
852 if (!cmd_request->bounce_sgl) {
853 scmnd->scsi_done = NULL;
854 scmnd->host_scribble = NULL;
855 kmem_cache_free(host_dev->request_pool,
858 return SCSI_MLQUEUE_HOST_BUSY;
861 cmd_request->bounce_sgl_count =
862 ALIGN(scsi_bufflen(scmnd), PAGE_SIZE) >>
866 * FIXME: We can optimize on reads by just skipping
869 copy_to_bounce_buffer(sgl, cmd_request->bounce_sgl,
870 scsi_sg_count(scmnd));
872 sgl = cmd_request->bounce_sgl;
873 sg_count = cmd_request->bounce_sgl_count;
876 request->data_buffer.offset = sgl[0].offset;
878 for (i = 0; i < sg_count; i++)
879 request->data_buffer.pfn_array[i] =
880 page_to_pfn(sg_page((&sgl[i])));
882 } else if (scsi_sglist(scmnd)) {
883 /* ASSERT(scsi_bufflen(scmnd) <= PAGE_SIZE); */
884 request->data_buffer.offset =
885 virt_to_phys(scsi_sglist(scmnd)) & (PAGE_SIZE-1);
886 request->data_buffer.pfn_array[0] =
887 virt_to_phys(scsi_sglist(scmnd)) >> PAGE_SHIFT;
891 /* Invokes the vsc to start an IO */
892 ret = storvsc_drv_obj->on_io_request(dev,
893 &cmd_request->request);
897 if (cmd_request->bounce_sgl_count) {
899 * FIXME: We can optimize on writes by just skipping
902 copy_from_bounce_buffer(scsi_sglist(scmnd),
903 cmd_request->bounce_sgl,
904 scsi_sg_count(scmnd));
905 destroy_bounce_buffer(cmd_request->bounce_sgl,
906 cmd_request->bounce_sgl_count);
909 kmem_cache_free(host_dev->request_pool, cmd_request);
911 scmnd->scsi_done = NULL;
912 scmnd->host_scribble = NULL;
914 ret = SCSI_MLQUEUE_DEVICE_BUSY;
920 static DEF_SCSI_QCMD(storvsc_queuecommand)
923 * storvsc_host_reset_handler - Reset the scsi HBA
925 static int storvsc_host_reset_handler(struct scsi_cmnd *scmnd)
928 struct hv_host_device *host_dev =
929 (struct hv_host_device *)scmnd->device->host->hostdata;
930 struct hv_device *dev = host_dev->dev;
932 DPRINT_INFO(STORVSC_DRV, "sdev (%p) dev obj (%p) - host resetting...",
935 /* Invokes the vsc to reset the host/bus */
936 ret = storvsc_host_reset(dev);
940 DPRINT_INFO(STORVSC_DRV, "sdev (%p) dev obj (%p) - host reseted",
946 static int __init storvsc_init(void)
950 DPRINT_INFO(STORVSC_DRV, "Storvsc initializing....");
951 ret = storvsc_drv_init();
955 static void __exit storvsc_exit(void)
960 MODULE_LICENSE("GPL");
961 MODULE_VERSION(HV_DRV_VERSION);
962 MODULE_DESCRIPTION("Microsoft Hyper-V virtual storage driver");
963 module_init(storvsc_init);
964 module_exit(storvsc_exit);