]> Pileus Git - ~andy/linux/blob - drivers/staging/hv/blkvsc_drv.c
Staging: hv: check return value of driver_for_each_device()
[~andy/linux] / drivers / staging / hv / blkvsc_drv.c
1 /*
2  *
3  * Copyright (c) 2009, Microsoft Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  * You should have received a copy of the GNU General Public License along with
15  * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
16  * Place - Suite 330, Boston, MA 02111-1307 USA.
17  *
18  * Authors:
19  *   Hank Janssen  <hjanssen@microsoft.com>
20  *
21  */
22
23 #include <linux/init.h>
24 #include <linux/module.h>
25 #include <linux/device.h>
26 #include <linux/blkdev.h>
27 #include <linux/major.h>
28 #include <linux/delay.h>
29 #include <linux/hdreg.h>
30
31 #include <scsi/scsi.h>
32 #include <scsi/scsi_cmnd.h>
33 #include <scsi/scsi_eh.h>
34 #include <scsi/scsi_dbg.h>
35
36 #include "include/logging.h"
37 #include "include/vmbus.h"
38
39 #include "include/StorVscApi.h"
40
41
42 /* #defines */
43
44 #define BLKVSC_MINORS   64
45
46
47 /* Data types */
48
49 enum blkvsc_device_type {
50         UNKNOWN_DEV_TYPE,
51         HARDDISK_TYPE,
52         DVD_TYPE,
53 };
54
55 /*
56  * This request ties the struct request and struct
57  * blkvsc_request/STORVSC_REQUEST together A struct request may be
58  * represented by 1 or more struct blkvsc_request
59  */
60 struct blkvsc_request_group {
61         int                                     outstanding;
62         int                                     status;
63
64         struct list_head        blkvsc_req_list;        /* list of blkvsc_requests */
65 };
66
67
68 struct blkvsc_request {
69         struct list_head        req_entry;                      /* blkvsc_request_group.blkvsc_req_list */
70
71         struct list_head        pend_entry;                     /* block_device_context.pending_list */
72
73         struct request          *req;                           /* This may be null if we generate a request internally */
74         struct block_device_context     *dev;
75         struct blkvsc_request_group     *group;         /* The group this request is part of. Maybe null */
76
77         wait_queue_head_t       wevent;
78         int cond;
79
80         int                                     write;
81         sector_t                        sector_start;
82         unsigned long           sector_count;
83
84         unsigned char sense_buffer[SCSI_SENSE_BUFFERSIZE];
85         unsigned char cmd_len;
86         unsigned char cmnd[MAX_COMMAND_SIZE];
87
88         STORVSC_REQUEST         request;
89         /* !!!DO NOT ADD ANYTHING BELOW HERE!!! Otherwise, memory can overlap, because - */
90         /* The extension buffer falls right here and is pointed to by request.Extension; */
91 };
92
93 /* Per device structure */
94 struct block_device_context {
95         struct device_context   *device_ctx; /* point back to our device context */
96         struct kmem_cache       *request_pool;
97         spinlock_t                              lock;
98         struct gendisk                  *gd;
99         enum blkvsc_device_type device_type;
100         struct list_head                pending_list;
101
102         unsigned char                   device_id[64];
103         unsigned int                    device_id_len;
104         int                                             num_outstanding_reqs;
105         int                                             shutting_down;
106         int                                             media_not_present;
107         unsigned int                    sector_size;
108         sector_t                                capacity;
109         unsigned int                    port;
110         unsigned char                   path;
111         unsigned char                   target;
112         int                                             users;
113 };
114
115 /* Per driver */
116 struct blkvsc_driver_context {
117         /* !! These must be the first 2 fields !! */
118         struct driver_context   drv_ctx;
119         STORVSC_DRIVER_OBJECT   drv_obj;
120 };
121
122 /* Static decl */
123 static int blkvsc_probe(struct device *dev);
124 static int blkvsc_remove(struct device *device);
125 static void blkvsc_shutdown(struct device *device);
126
127 static int blkvsc_open(struct block_device *bdev,  fmode_t mode);
128 static int blkvsc_release(struct gendisk *disk, fmode_t mode);
129 static int blkvsc_media_changed(struct gendisk *gd);
130 static int blkvsc_revalidate_disk(struct gendisk *gd);
131 static int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg);
132 static int blkvsc_ioctl(struct block_device *bd, fmode_t mode,
133                         unsigned cmd, unsigned long argument);
134 static void blkvsc_request(struct request_queue *queue);
135 static void blkvsc_request_completion(STORVSC_REQUEST* request);
136 static int blkvsc_do_request(struct block_device_context *blkdev, struct request *req);
137 static int blkvsc_submit_request(struct blkvsc_request *blkvsc_req, void (*request_completion)(STORVSC_REQUEST*) );
138 static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req);
139 static void blkvsc_cmd_completion(STORVSC_REQUEST* request);
140 static int blkvsc_do_inquiry(struct block_device_context *blkdev);
141 static int blkvsc_do_read_capacity(struct block_device_context *blkdev);
142 static int blkvsc_do_read_capacity16(struct block_device_context *blkdev);
143 static int blkvsc_do_flush(struct block_device_context *blkdev);
144 static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev);
145 static int blkvsc_do_pending_reqs(struct block_device_context *blkdev);
146
147
148 static int blkvsc_ringbuffer_size = BLKVSC_RING_BUFFER_SIZE;
149
150 /* The one and only one */
151 static struct blkvsc_driver_context g_blkvsc_drv;
152
153
154 static struct block_device_operations block_ops =
155 {
156         .owner = THIS_MODULE,
157         .open = blkvsc_open,
158         .release = blkvsc_release,
159         .media_changed = blkvsc_media_changed,
160         .revalidate_disk = blkvsc_revalidate_disk,
161         .getgeo = blkvsc_getgeo,
162         .ioctl  = blkvsc_ioctl,
163 };
164
165 /*++
166
167 Name:   blkvsc_drv_init()
168
169 Desc:   BlkVsc driver initialization.
170
171 --*/
172 int blkvsc_drv_init(PFN_DRIVERINITIALIZE pfn_drv_init)
173 {
174         int ret=0;
175         STORVSC_DRIVER_OBJECT *storvsc_drv_obj=&g_blkvsc_drv.drv_obj;
176         struct driver_context *drv_ctx=&g_blkvsc_drv.drv_ctx;
177
178         DPRINT_ENTER(BLKVSC_DRV);
179
180         vmbus_get_interface(&storvsc_drv_obj->Base.VmbusChannelInterface);
181
182         storvsc_drv_obj->RingBufferSize = blkvsc_ringbuffer_size;
183
184         /* Callback to client driver to complete the initialization */
185         pfn_drv_init(&storvsc_drv_obj->Base);
186
187         drv_ctx->driver.name = storvsc_drv_obj->Base.name;
188         memcpy(&drv_ctx->class_id, &storvsc_drv_obj->Base.deviceType, sizeof(GUID));
189
190         drv_ctx->probe = blkvsc_probe;
191         drv_ctx->remove = blkvsc_remove;
192         drv_ctx->shutdown = blkvsc_shutdown;
193
194         /* The driver belongs to vmbus */
195         ret = vmbus_child_driver_register(drv_ctx);
196
197         DPRINT_EXIT(BLKVSC_DRV);
198
199         return ret;
200 }
201
202
203 static int blkvsc_drv_exit_cb(struct device *dev, void *data)
204 {
205         struct device **curr = (struct device **)data;
206         *curr = dev;
207         return 1; /* stop iterating */
208 }
209
210 /*++
211
212 Name:   blkvsc_drv_exit()
213
214 Desc:
215
216 --*/
217 void blkvsc_drv_exit(void)
218 {
219         STORVSC_DRIVER_OBJECT *storvsc_drv_obj=&g_blkvsc_drv.drv_obj;
220         struct driver_context *drv_ctx=&g_blkvsc_drv.drv_ctx;
221         struct device *current_dev=NULL;
222         int ret;
223
224         DPRINT_ENTER(BLKVSC_DRV);
225
226         while (1)
227         {
228                 current_dev = NULL;
229
230                 /* Get the device */
231                 ret = driver_for_each_device(&drv_ctx->driver, NULL,
232                                              (void *) &current_dev,
233                                              blkvsc_drv_exit_cb);
234
235                 if (ret)
236                         DPRINT_WARN(BLKVSC_DRV,
237                                     "driver_for_each_device returned %d", ret);
238
239
240                 if (current_dev == NULL)
241                         break;
242
243                 /* Initiate removal from the top-down */
244                 device_unregister(current_dev);
245         }
246
247         if (storvsc_drv_obj->Base.OnCleanup)
248                 storvsc_drv_obj->Base.OnCleanup(&storvsc_drv_obj->Base);
249
250         vmbus_child_driver_unregister(drv_ctx);
251
252         DPRINT_EXIT(BLKVSC_DRV);
253
254         return;
255 }
256
257 /*++
258
259 Name:   blkvsc_probe()
260
261 Desc:   Add a new device for this driver
262
263 --*/
264 static int blkvsc_probe(struct device *device)
265 {
266         int ret=0;
267
268         struct driver_context *driver_ctx = driver_to_driver_context(device->driver);
269         struct blkvsc_driver_context *blkvsc_drv_ctx = (struct blkvsc_driver_context*)driver_ctx;
270         STORVSC_DRIVER_OBJECT* storvsc_drv_obj = &blkvsc_drv_ctx->drv_obj;
271
272         struct device_context *device_ctx = device_to_device_context(device);
273         struct hv_device *device_obj = &device_ctx->device_obj;
274
275         struct block_device_context *blkdev=NULL;
276         STORVSC_DEVICE_INFO device_info;
277         int major=0;
278         int devnum=0;
279
280         static int ide0_registered=0;
281         static int ide1_registered=0;
282
283         DPRINT_ENTER(BLKVSC_DRV);
284
285         DPRINT_DBG(BLKVSC_DRV, "blkvsc_probe - enter");
286
287         if (!storvsc_drv_obj->Base.OnDeviceAdd)
288         {
289                 DPRINT_ERR(BLKVSC_DRV, "OnDeviceAdd() not set");
290
291                 ret = -1;
292                 goto Cleanup;
293         }
294
295         blkdev = kzalloc(sizeof(struct block_device_context), GFP_KERNEL);
296         if (!blkdev)
297         {
298                 ret = -ENOMEM;
299                 goto Cleanup;
300         }
301
302         INIT_LIST_HEAD(&blkdev->pending_list);
303
304         /* Initialize what we can here */
305         spin_lock_init(&blkdev->lock);
306
307         ASSERT(sizeof(struct blkvsc_request_group) <= sizeof(struct blkvsc_request));
308
309         blkdev->request_pool = kmem_cache_create(dev_name(&device_ctx->device),
310                 sizeof(struct blkvsc_request) + storvsc_drv_obj->RequestExtSize, 0,
311                 SLAB_HWCACHE_ALIGN, NULL);
312         if (!blkdev->request_pool)
313         {
314                 ret = -ENOMEM;
315                 goto Cleanup;
316         }
317
318
319         /* Call to the vsc driver to add the device */
320         ret = storvsc_drv_obj->Base.OnDeviceAdd(device_obj, &device_info);
321         if (ret != 0)
322         {
323                 DPRINT_ERR(BLKVSC_DRV, "unable to add blkvsc device");
324                 goto Cleanup;
325         }
326
327         blkdev->device_ctx = device_ctx;
328         blkdev->target = device_info.TargetId; /* this identified the device 0 or 1 */
329         blkdev->path = device_info.PathId; /* this identified the ide ctrl 0 or 1 */
330
331         dev_set_drvdata(device, blkdev);
332
333         /* Calculate the major and device num */
334         if (blkdev->path == 0)
335         {
336                 major = IDE0_MAJOR;
337                 devnum = blkdev->path + blkdev->target;         /* 0 or 1 */
338
339                 if (!ide0_registered)
340                 {
341                         ret = register_blkdev(major, "ide");
342                         if (ret != 0)
343                         {
344                                 DPRINT_ERR(BLKVSC_DRV, "register_blkdev() failed! ret %d", ret);
345                                 goto Remove;
346                         }
347
348                         ide0_registered = 1;
349                 }
350         }
351         else if (blkdev->path == 1)
352         {
353                 major = IDE1_MAJOR;
354                 devnum = blkdev->path + blkdev->target + 1; /* 2 or 3 */
355
356                 if (!ide1_registered)
357                 {
358                         ret = register_blkdev(major, "ide");
359                         if (ret != 0)
360                         {
361                                 DPRINT_ERR(BLKVSC_DRV, "register_blkdev() failed! ret %d", ret);
362                                 goto Remove;
363                         }
364
365                         ide1_registered = 1;
366                 }
367
368         }
369         else
370         {
371                 DPRINT_ERR(BLKVSC_DRV, "invalid pathid");
372                 ret = -1;
373                 goto Cleanup;
374         }
375
376         DPRINT_INFO(BLKVSC_DRV, "blkvsc registered for major %d!!", major);
377
378         blkdev->gd = alloc_disk(BLKVSC_MINORS);
379         if (!blkdev->gd)
380         {
381                 DPRINT_ERR(BLKVSC_DRV, "register_blkdev() failed! ret %d", ret);
382                 ret = -1;
383                 goto Cleanup;
384         }
385
386         blkdev->gd->queue = blk_init_queue(blkvsc_request, &blkdev->lock);
387
388         blk_queue_max_segment_size(blkdev->gd->queue, PAGE_SIZE);
389         blk_queue_max_phys_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT);
390         blk_queue_max_hw_segments(blkdev->gd->queue, MAX_MULTIPAGE_BUFFER_COUNT);
391         blk_queue_segment_boundary(blkdev->gd->queue, PAGE_SIZE-1);
392         blk_queue_bounce_limit(blkdev->gd->queue, BLK_BOUNCE_ANY);
393         blk_queue_dma_alignment(blkdev->gd->queue, 511);
394
395         blkdev->gd->major = major;
396         if (devnum == 1 || devnum == 3)
397                 blkdev->gd->first_minor = BLKVSC_MINORS;
398         else
399                 blkdev->gd->first_minor = 0;
400         blkdev->gd->fops = &block_ops;
401         blkdev->gd->private_data = blkdev;
402         sprintf(blkdev->gd->disk_name, "hd%c", 'a'+ devnum);
403
404         blkvsc_do_inquiry(blkdev);
405         if (blkdev->device_type == DVD_TYPE)
406         {
407                 set_disk_ro(blkdev->gd, 1);
408                 blkdev->gd->flags |= GENHD_FL_REMOVABLE;
409                 blkvsc_do_read_capacity(blkdev);
410         }
411         else
412         {
413                 blkvsc_do_read_capacity16(blkdev);
414         }
415
416         set_capacity(blkdev->gd, blkdev->capacity * (blkdev->sector_size/512));
417         blk_queue_logical_block_size(blkdev->gd->queue, blkdev->sector_size);
418         /* go! */
419         add_disk(blkdev->gd);
420
421         DPRINT_INFO(BLKVSC_DRV, "%s added!! capacity %lu sector_size %d", blkdev->gd->disk_name, (unsigned long) blkdev->capacity, blkdev->sector_size);
422
423         return ret;
424
425 Remove:
426         storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
427
428 Cleanup:
429         if (blkdev)
430         {
431                 if (blkdev->request_pool)
432                 {
433                         kmem_cache_destroy(blkdev->request_pool);
434                         blkdev->request_pool = NULL;
435                 }
436                 kfree(blkdev);
437                 blkdev = NULL;
438         }
439
440         DPRINT_EXIT(BLKVSC_DRV);
441
442         return ret;
443 }
444
445 static void blkvsc_shutdown(struct device *device)
446 {
447         struct block_device_context *blkdev = dev_get_drvdata(device);
448         unsigned long flags;
449
450         if (!blkdev)
451                 return;
452
453         DPRINT_DBG(BLKVSC_DRV, "blkvsc_shutdown - users %d disk %s\n", blkdev->users, blkdev->gd->disk_name);
454
455         spin_lock_irqsave(&blkdev->lock, flags);
456
457         blkdev->shutting_down = 1;
458
459         blk_stop_queue(blkdev->gd->queue);
460
461         spin_unlock_irqrestore(&blkdev->lock, flags);
462
463         while (blkdev->num_outstanding_reqs)
464         {
465                 DPRINT_INFO(STORVSC, "waiting for %d requests to complete...", blkdev->num_outstanding_reqs);
466
467                 udelay(100);
468         }
469
470         blkvsc_do_flush(blkdev);
471
472         spin_lock_irqsave(&blkdev->lock, flags);
473
474         blkvsc_cancel_pending_reqs(blkdev);
475
476         spin_unlock_irqrestore(&blkdev->lock, flags);
477 }
478
479 static int blkvsc_do_flush(struct block_device_context *blkdev)
480 {
481         struct blkvsc_request *blkvsc_req=NULL;
482
483         DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_flush()\n");
484
485         if (blkdev->device_type != HARDDISK_TYPE)
486                 return 0;
487
488         blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
489         if (!blkvsc_req)
490         {
491                 return -ENOMEM;
492         }
493
494         memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
495         init_waitqueue_head(&blkvsc_req->wevent);
496         blkvsc_req->dev = blkdev;
497         blkvsc_req->req = NULL;
498         blkvsc_req->write = 0;
499
500         blkvsc_req->request.DataBuffer.PfnArray[0] = 0;
501         blkvsc_req->request.DataBuffer.Offset = 0;
502         blkvsc_req->request.DataBuffer.Length = 0;
503
504         blkvsc_req->cmnd[0] = SYNCHRONIZE_CACHE;
505         blkvsc_req->cmd_len = 10;
506
507         /* Set this here since the completion routine may be invoked and completed before we return */
508         blkvsc_req->cond =0;
509         blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
510
511         wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
512
513         kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
514
515         return 0;
516 }
517
518 /* Do a scsi INQUIRY cmd here to get the device type (ie disk or dvd) */
519 static int blkvsc_do_inquiry(struct block_device_context *blkdev)
520 {
521         struct blkvsc_request *blkvsc_req=NULL;
522         struct page *page_buf;
523         unsigned char *buf;
524         unsigned char device_type;
525
526         DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_inquiry()\n");
527
528         blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
529         if (!blkvsc_req)
530         {
531                 return -ENOMEM;
532         }
533
534         memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
535         page_buf = alloc_page(GFP_KERNEL);
536         if (!page_buf)
537         {
538                 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
539                 return -ENOMEM;
540         }
541
542         init_waitqueue_head(&blkvsc_req->wevent);
543         blkvsc_req->dev = blkdev;
544         blkvsc_req->req = NULL;
545         blkvsc_req->write = 0;
546
547         blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf);
548         blkvsc_req->request.DataBuffer.Offset = 0;
549         blkvsc_req->request.DataBuffer.Length = 64;
550
551         blkvsc_req->cmnd[0] = INQUIRY;
552         blkvsc_req->cmnd[1] = 0x1;              /* Get product data */
553         blkvsc_req->cmnd[2] = 0x83;             /* mode page 83 */
554         blkvsc_req->cmnd[4] = 64;
555         blkvsc_req->cmd_len = 6;
556
557         /* Set this here since the completion routine may be invoked and completed before we return */
558         blkvsc_req->cond =0;
559
560         blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
561
562         DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n", blkvsc_req, blkvsc_req->cond);
563
564         wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
565
566         buf = kmap(page_buf);
567
568         /* print_hex_dump_bytes("", DUMP_PREFIX_NONE, buf, 64); */
569         /* be to le */
570         device_type = buf[0] & 0x1F;
571
572         if (device_type == 0x0)
573         {
574                 blkdev->device_type = HARDDISK_TYPE;
575         }
576         else if (device_type == 0x5)
577         {
578                 blkdev->device_type = DVD_TYPE;
579         }
580         else
581         {
582                 /* TODO: this is currently unsupported device type */
583                 blkdev->device_type = UNKNOWN_DEV_TYPE;
584         }
585
586         DPRINT_DBG(BLKVSC_DRV, "device type %d \n", device_type);
587
588         blkdev->device_id_len = buf[7];
589         if (blkdev->device_id_len > 64)
590                 blkdev->device_id_len = 64;
591
592         memcpy(blkdev->device_id, &buf[8], blkdev->device_id_len);
593         /* printk_hex_dump_bytes("", DUMP_PREFIX_NONE, blkdev->device_id,
594          * blkdev->device_id_len); */
595
596         kunmap(page_buf);
597
598         __free_page(page_buf);
599
600         kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
601
602         return 0;
603 }
604
605 /* Do a scsi READ_CAPACITY cmd here to get the size of the disk */
606 static int blkvsc_do_read_capacity(struct block_device_context *blkdev)
607 {
608         struct blkvsc_request *blkvsc_req=NULL;
609         struct page *page_buf;
610         unsigned char *buf;
611         struct scsi_sense_hdr sense_hdr;
612
613         DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_read_capacity()\n");
614
615         blkdev->sector_size = 0;
616         blkdev->capacity = 0;
617         blkdev->media_not_present = 0; /* assume a disk is present */
618
619         blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
620         if (!blkvsc_req)
621         {
622                 return -ENOMEM;
623         }
624
625         memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
626         page_buf = alloc_page(GFP_KERNEL);
627         if (!page_buf)
628         {
629                 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
630                 return -ENOMEM;
631         }
632
633         init_waitqueue_head(&blkvsc_req->wevent);
634         blkvsc_req->dev = blkdev;
635         blkvsc_req->req = NULL;
636         blkvsc_req->write = 0;
637
638         blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf);
639         blkvsc_req->request.DataBuffer.Offset = 0;
640         blkvsc_req->request.DataBuffer.Length = 8;
641
642         blkvsc_req->cmnd[0] = READ_CAPACITY;
643         blkvsc_req->cmd_len = 16;
644
645         /*
646          * Set this here since the completion routine may be invoked
647          * and completed before we return
648          */
649         blkvsc_req->cond =0;
650
651         blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
652
653         DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n", blkvsc_req, blkvsc_req->cond);
654
655         wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
656
657         /* check error */
658         if (blkvsc_req->request.Status)
659         {
660                 scsi_normalize_sense(blkvsc_req->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
661
662                 if (sense_hdr.asc == 0x3A) /* Medium not present */
663                 {
664                         blkdev->media_not_present = 1;
665                 }
666
667                 return 0;
668         }
669         buf = kmap(page_buf);
670
671         /* be to le */
672         blkdev->capacity = ((buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]) + 1;
673         blkdev->sector_size = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
674
675         kunmap(page_buf);
676
677         __free_page(page_buf);
678
679         kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
680
681         return 0;
682 }
683
684
685 static int blkvsc_do_read_capacity16(struct block_device_context *blkdev)
686 {
687         struct blkvsc_request *blkvsc_req=NULL;
688         struct page *page_buf;
689         unsigned char *buf;
690         struct scsi_sense_hdr sense_hdr;
691
692         DPRINT_DBG(BLKVSC_DRV, "blkvsc_do_read_capacity16()\n");
693
694         blkdev->sector_size = 0;
695         blkdev->capacity = 0;
696         blkdev->media_not_present = 0; /* assume a disk is present */
697
698         blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_KERNEL);
699         if (!blkvsc_req)
700         {
701                 return -ENOMEM;
702         }
703
704         memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
705         page_buf = alloc_page(GFP_KERNEL);
706         if (!page_buf)
707         {
708                 kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
709                 return -ENOMEM;
710         }
711
712         init_waitqueue_head(&blkvsc_req->wevent);
713         blkvsc_req->dev = blkdev;
714         blkvsc_req->req = NULL;
715         blkvsc_req->write = 0;
716
717         blkvsc_req->request.DataBuffer.PfnArray[0] = page_to_pfn(page_buf);
718         blkvsc_req->request.DataBuffer.Offset = 0;
719         blkvsc_req->request.DataBuffer.Length = 12;
720
721         blkvsc_req->cmnd[0] = 0x9E; /* READ_CAPACITY16; */
722         blkvsc_req->cmd_len = 16;
723
724         /*
725          * Set this here since the completion routine may be invoked
726          * and completed before we return
727          */
728         blkvsc_req->cond =0;
729
730         blkvsc_submit_request(blkvsc_req, blkvsc_cmd_completion);
731
732         DPRINT_DBG(BLKVSC_DRV, "waiting %p to complete - cond %d\n", blkvsc_req, blkvsc_req->cond);
733
734         wait_event_interruptible(blkvsc_req->wevent, blkvsc_req->cond);
735
736         /* check error */
737         if (blkvsc_req->request.Status)
738         {
739                 scsi_normalize_sense(blkvsc_req->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sense_hdr);
740
741                 if (sense_hdr.asc == 0x3A) /* Medium not present */
742                 {
743                         blkdev->media_not_present = 1;
744                 }
745
746                 return 0;
747         }
748         buf = kmap(page_buf);
749
750         /* be to le */
751         blkdev->capacity = be64_to_cpu(*(unsigned long long*) &buf[0]) + 1;
752         blkdev->sector_size = be32_to_cpu(*(unsigned int*)&buf[8]);
753
754         /* blkdev->capacity = ((buf[0] << 24) | (buf[1] << 16) | (buf[2] << 8) | buf[3]) + 1; */
755         /* blkdev->sector_size = (buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7]; */
756
757         kunmap(page_buf);
758
759         __free_page(page_buf);
760
761         kmem_cache_free(blkvsc_req->dev->request_pool, blkvsc_req);
762
763         return 0;
764 }
765
766 /*++
767
768 Name:   blkvsc_remove()
769
770 Desc:   Callback when our device is removed
771
772 --*/
773 static int blkvsc_remove(struct device *device)
774 {
775         int ret=0;
776
777         struct driver_context *driver_ctx = driver_to_driver_context(device->driver);
778         struct blkvsc_driver_context *blkvsc_drv_ctx = (struct blkvsc_driver_context*)driver_ctx;
779         STORVSC_DRIVER_OBJECT* storvsc_drv_obj = &blkvsc_drv_ctx->drv_obj;
780
781         struct device_context *device_ctx = device_to_device_context(device);
782         struct hv_device *device_obj = &device_ctx->device_obj;
783         struct block_device_context *blkdev = dev_get_drvdata(device);
784         unsigned long flags;
785
786         DPRINT_ENTER(BLKVSC_DRV);
787
788         DPRINT_DBG(BLKVSC_DRV, "blkvsc_remove()\n");
789
790         if (!storvsc_drv_obj->Base.OnDeviceRemove)
791         {
792                 DPRINT_EXIT(BLKVSC_DRV);
793                 return -1;
794         }
795
796         /* Call to the vsc driver to let it know that the device is being removed */
797         ret = storvsc_drv_obj->Base.OnDeviceRemove(device_obj);
798         if (ret != 0)
799         {
800                 /* TODO: */
801                 DPRINT_ERR(BLKVSC_DRV, "unable to remove blkvsc device (ret %d)", ret);
802         }
803
804         /* Get to a known state */
805         spin_lock_irqsave(&blkdev->lock, flags);
806
807         blkdev->shutting_down = 1;
808
809         blk_stop_queue(blkdev->gd->queue);
810
811         spin_unlock_irqrestore(&blkdev->lock, flags);
812
813         while (blkdev->num_outstanding_reqs)
814         {
815                 DPRINT_INFO(STORVSC, "waiting for %d requests to complete...", blkdev->num_outstanding_reqs);
816
817                 udelay(100);
818         }
819
820         blkvsc_do_flush(blkdev);
821
822         spin_lock_irqsave(&blkdev->lock, flags);
823
824         blkvsc_cancel_pending_reqs(blkdev);
825
826         spin_unlock_irqrestore(&blkdev->lock, flags);
827
828         blk_cleanup_queue(blkdev->gd->queue);
829
830         del_gendisk(blkdev->gd);
831
832         kmem_cache_destroy(blkdev->request_pool);
833
834         kfree(blkdev);
835
836         DPRINT_EXIT(BLKVSC_DRV);
837
838         return ret;
839 }
840
841 static void blkvsc_init_rw(struct blkvsc_request *blkvsc_req)
842 {
843         ASSERT(blkvsc_req->req);
844         ASSERT(blkvsc_req->sector_count <= (MAX_MULTIPAGE_BUFFER_COUNT*8));
845
846         blkvsc_req->cmd_len = 16;
847
848         if (blkvsc_req->sector_start > 0xffffffff)
849         {
850                 if (rq_data_dir(blkvsc_req->req))
851                 {
852                         blkvsc_req->write = 1;
853                         blkvsc_req->cmnd[0] = WRITE_16;
854                 }
855                 else
856                 {
857                         blkvsc_req->write = 0;
858                         blkvsc_req->cmnd[0] = READ_16;
859                 }
860
861                 blkvsc_req->cmnd[1] |= blk_fua_rq(blkvsc_req->req) ? 0x8 : 0;
862
863                 *(unsigned long long*)&blkvsc_req->cmnd[2] = cpu_to_be64(blkvsc_req->sector_start);
864                 *(unsigned int*)&blkvsc_req->cmnd[10] = cpu_to_be32(blkvsc_req->sector_count);
865         }
866         else if ((blkvsc_req->sector_count > 0xff) || (blkvsc_req->sector_start > 0x1fffff))
867         {
868                 if (rq_data_dir(blkvsc_req->req))
869                 {
870                         blkvsc_req->write = 1;
871                         blkvsc_req->cmnd[0] = WRITE_10;
872                 }
873                 else
874                 {
875                         blkvsc_req->write = 0;
876                         blkvsc_req->cmnd[0] = READ_10;
877                 }
878
879                 blkvsc_req->cmnd[1] |= blk_fua_rq(blkvsc_req->req) ? 0x8 : 0;
880
881                 *(unsigned int *)&blkvsc_req->cmnd[2] = cpu_to_be32(blkvsc_req->sector_start);
882                 *(unsigned short*)&blkvsc_req->cmnd[7] = cpu_to_be16(blkvsc_req->sector_count);
883     }
884         else
885         {
886                 if (rq_data_dir(blkvsc_req->req))
887                 {
888                         blkvsc_req->write = 1;
889                         blkvsc_req->cmnd[0] = WRITE_6;
890                 }
891                 else
892                 {
893                         blkvsc_req->write = 0;
894                         blkvsc_req->cmnd[0] = READ_6;
895                 }
896
897                 *(unsigned int *)&blkvsc_req->cmnd[1] = cpu_to_be32(blkvsc_req->sector_start) >> 8;
898                 blkvsc_req->cmnd[1] &= 0x1f;
899                 blkvsc_req->cmnd[4] = (unsigned char) blkvsc_req->sector_count;
900         }
901 }
902
903 static int blkvsc_submit_request(struct blkvsc_request *blkvsc_req, void (*request_completion)(STORVSC_REQUEST*) )
904 {
905         struct block_device_context *blkdev = blkvsc_req->dev;
906         struct device_context *device_ctx=blkdev->device_ctx;
907         struct driver_context *driver_ctx = driver_to_driver_context(device_ctx->device.driver);
908         struct blkvsc_driver_context *blkvsc_drv_ctx = (struct blkvsc_driver_context*)driver_ctx;
909         STORVSC_DRIVER_OBJECT* storvsc_drv_obj = &blkvsc_drv_ctx->drv_obj;
910         int ret =0;
911
912         STORVSC_REQUEST *storvsc_req;
913
914         DPRINT_DBG(BLKVSC_DRV, "blkvsc_submit_request() - req %p type %s start_sector %lu count %ld offset %d len %d\n",
915                 blkvsc_req,
916                 (blkvsc_req->write)?"WRITE":"READ",
917                 (unsigned long) blkvsc_req->sector_start,
918                 blkvsc_req->sector_count,
919                 blkvsc_req->request.DataBuffer.Offset,
920                 blkvsc_req->request.DataBuffer.Length);
921
922         /*for (i=0; i < (blkvsc_req->request.DataBuffer.Length >> 12); i++)
923         {
924                 DPRINT_DBG(BLKVSC_DRV, "blkvsc_submit_request() - req %p pfn[%d] %llx\n",
925                 blkvsc_req,
926                 i,
927                 blkvsc_req->request.DataBuffer.PfnArray[i]);
928         }*/
929
930         storvsc_req = &blkvsc_req->request;
931         storvsc_req->Extension = (void*)((unsigned long)blkvsc_req + sizeof(struct blkvsc_request));
932
933         storvsc_req->Type = blkvsc_req->write? WRITE_TYPE : READ_TYPE;
934
935         storvsc_req->OnIOCompletion = request_completion;
936         storvsc_req->Context = blkvsc_req;
937
938         storvsc_req->Host = blkdev->port;
939         storvsc_req->Bus = blkdev->path;
940         storvsc_req->TargetId = blkdev->target;
941         storvsc_req->LunId = 0;  /* this is not really used at all */
942
943         storvsc_req->CdbLen = blkvsc_req->cmd_len;
944         storvsc_req->Cdb = blkvsc_req->cmnd;
945
946         storvsc_req->SenseBuffer = blkvsc_req->sense_buffer;
947         storvsc_req->SenseBufferSize = SCSI_SENSE_BUFFERSIZE;
948
949         ret = storvsc_drv_obj->OnIORequest(&blkdev->device_ctx->device_obj, &blkvsc_req->request);
950         if (ret == 0)
951         {
952                 blkdev->num_outstanding_reqs++;
953         }
954
955         return ret;
956 }
957
958
959 /*
960  * We break the request into 1 or more blkvsc_requests and submit
961  * them.  If we cant submit them all, we put them on the
962  * pending_list. The blkvsc_request() will work on the pending_list.
963  */
964
965 static int blkvsc_do_request(struct block_device_context *blkdev, struct request *req)
966 {
967         struct bio *bio=NULL;
968         struct bio_vec *bvec=NULL;
969         struct bio_vec *prev_bvec=NULL;
970
971         struct blkvsc_request *blkvsc_req=NULL;
972         struct blkvsc_request *tmp;
973         int databuf_idx=0;
974         int seg_idx=0;
975
976         sector_t start_sector;
977         unsigned long num_sectors = 0;
978         int ret=0;
979         int pending=0;
980         struct blkvsc_request_group *group=NULL;
981
982         DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p sect %lu \n", blkdev, req, (unsigned long) blk_rq_pos(req));
983
984         /* Create a group to tie req to list of blkvsc_reqs */
985         group = (struct blkvsc_request_group*)kmem_cache_alloc(blkdev->request_pool, GFP_ATOMIC);
986         if (!group)
987         {
988                 return -ENOMEM;
989         }
990
991         INIT_LIST_HEAD(&group->blkvsc_req_list);
992         group->outstanding = group->status = 0;
993
994         start_sector = blk_rq_pos(req);
995
996         /* foreach bio in the request */
997         if (req->bio)
998          for (bio = req->bio; bio; bio = bio->bi_next)
999         {
1000                 /* Map this bio into an existing or new storvsc request */
1001                 bio_for_each_segment (bvec, bio, seg_idx)
1002                 {
1003                         DPRINT_DBG(BLKVSC_DRV, "bio_for_each_segment() - req %p bio %p bvec %p seg_idx %d databuf_idx %d\n",
1004                                                         req, bio, bvec, seg_idx, databuf_idx);
1005
1006                         /* Get a new storvsc request */
1007                         if ( (!blkvsc_req) ||                                                                   /* 1st-time */
1008                                  (databuf_idx >= MAX_MULTIPAGE_BUFFER_COUNT) ||
1009                                  (bvec->bv_offset != 0) ||                                                      /* hole at the begin of page */
1010                                  (prev_bvec && (prev_bvec->bv_len != PAGE_SIZE)) )      /* hold at the end of page */
1011                         {
1012                                 /* submit the prev one */
1013                                 if (blkvsc_req)
1014                                 {
1015                                         blkvsc_req->sector_start = start_sector;
1016                                         sector_div(blkvsc_req->sector_start, (blkdev->sector_size >> 9));
1017
1018                                         blkvsc_req->sector_count = num_sectors / (blkdev->sector_size >> 9);
1019
1020                                         blkvsc_init_rw(blkvsc_req);
1021                                 }
1022
1023                                 /* Create new blkvsc_req to represent the current bvec */
1024                                 blkvsc_req = kmem_cache_alloc(blkdev->request_pool, GFP_ATOMIC);
1025                                 if (!blkvsc_req)
1026                                 {
1027                                         /* free up everything */
1028                                         list_for_each_entry_safe(blkvsc_req, tmp, &group->blkvsc_req_list, req_entry)
1029                                         {
1030                                                 list_del(&blkvsc_req->req_entry);
1031                                                 kmem_cache_free(blkdev->request_pool, blkvsc_req);
1032                                         }
1033
1034                                         kmem_cache_free(blkdev->request_pool, group);
1035                                         return -ENOMEM;
1036                                 }
1037
1038                                 memset(blkvsc_req, 0, sizeof(struct blkvsc_request));
1039
1040                                 blkvsc_req->dev = blkdev;
1041                                 blkvsc_req->req = req;
1042                                 blkvsc_req->request.DataBuffer.Offset = bvec->bv_offset;
1043                                 blkvsc_req->request.DataBuffer.Length = 0;
1044
1045                                 /* Add to the group */
1046                                 blkvsc_req->group = group;
1047                                 blkvsc_req->group->outstanding++;
1048                                 list_add_tail(&blkvsc_req->req_entry, &blkvsc_req->group->blkvsc_req_list);
1049
1050                                 start_sector += num_sectors;
1051                                 num_sectors = 0;
1052                                 databuf_idx = 0;
1053                         }
1054
1055                         /* Add the curr bvec/segment to the curr blkvsc_req */
1056                         blkvsc_req->request.DataBuffer.PfnArray[databuf_idx] = page_to_pfn(bvec->bv_page);
1057                         blkvsc_req->request.DataBuffer.Length += bvec->bv_len;
1058
1059                         prev_bvec = bvec;
1060
1061                         databuf_idx++;
1062                         num_sectors += bvec->bv_len >> 9;
1063
1064                 } /* bio_for_each_segment */
1065
1066         } /* rq_for_each_bio */
1067
1068         /* Handle the last one */
1069         if (blkvsc_req)
1070         {
1071                 DPRINT_DBG(BLKVSC_DRV, "blkdev %p req %p group %p count %d\n", blkdev, req, blkvsc_req->group, blkvsc_req->group->outstanding);
1072
1073                 blkvsc_req->sector_start = start_sector;
1074                 sector_div(blkvsc_req->sector_start, (blkdev->sector_size >> 9));
1075
1076                 blkvsc_req->sector_count = num_sectors / (blkdev->sector_size >> 9);
1077
1078                 blkvsc_init_rw(blkvsc_req);
1079         }
1080
1081         list_for_each_entry(blkvsc_req, &group->blkvsc_req_list, req_entry)
1082         {
1083                 if (pending)
1084                 {
1085                         DPRINT_DBG(BLKVSC_DRV, "adding blkvsc_req to pending_list - blkvsc_req %p start_sect %lu sect_count %ld (%lu %ld)\n",
1086                                 blkvsc_req, blkvsc_req->sector_start, blkvsc_req->sector_count, (unsigned long) start_sector, (unsigned long) num_sectors);
1087
1088                         list_add_tail(&blkvsc_req->pend_entry, &blkdev->pending_list);
1089                 }
1090                 else
1091                 {
1092                         ret = blkvsc_submit_request(blkvsc_req, blkvsc_request_completion);
1093                         if (ret == -1)
1094                         {
1095                                 pending = 1;
1096                                 list_add_tail(&blkvsc_req->pend_entry, &blkdev->pending_list);
1097                         }
1098
1099                         DPRINT_DBG(BLKVSC_DRV, "submitted blkvsc_req %p start_sect %lu sect_count %ld (%lu %ld) ret %d\n",
1100                                 blkvsc_req, (unsigned long) blkvsc_req->sector_start, blkvsc_req->sector_count, (unsigned long) start_sector, num_sectors, ret);
1101                 }
1102         }
1103
1104         return pending;
1105 }
1106
1107 static void blkvsc_cmd_completion(STORVSC_REQUEST* request)
1108 {
1109         struct blkvsc_request *blkvsc_req=(struct blkvsc_request*)request->Context;
1110         struct block_device_context *blkdev = (struct block_device_context*)blkvsc_req->dev;
1111
1112         struct scsi_sense_hdr sense_hdr;
1113
1114         DPRINT_DBG(BLKVSC_DRV, "blkvsc_cmd_completion() - req %p\n", blkvsc_req);
1115
1116         blkdev->num_outstanding_reqs--;
1117
1118         if (blkvsc_req->request.Status)
1119         {
1120                 if (scsi_normalize_sense(blkvsc_req->sense_buffer, SCSI_SENSE_BUFFERSIZE, &sense_hdr))
1121                 {
1122                         scsi_print_sense_hdr("blkvsc", &sense_hdr);
1123                 }
1124         }
1125
1126         blkvsc_req->cond =1;
1127         wake_up_interruptible(&blkvsc_req->wevent);
1128 }
1129
1130 static void blkvsc_request_completion(STORVSC_REQUEST* request)
1131 {
1132         struct blkvsc_request *blkvsc_req=(struct blkvsc_request*)request->Context;
1133         struct block_device_context *blkdev = (struct block_device_context*)blkvsc_req->dev;
1134         unsigned long flags;
1135         struct blkvsc_request *comp_req, *tmp;
1136
1137         ASSERT(blkvsc_req->group);
1138
1139         DPRINT_DBG(BLKVSC_DRV, "blkdev %p blkvsc_req %p group %p type %s sect_start %lu sect_count %ld len %d group outstd %d total outstd %d\n",
1140                 blkdev,
1141                 blkvsc_req,
1142                 blkvsc_req->group,
1143                 (blkvsc_req->write)?"WRITE":"READ",
1144                 (unsigned long) blkvsc_req->sector_start,
1145                 blkvsc_req->sector_count,
1146                 blkvsc_req->request.DataBuffer.Length,
1147                 blkvsc_req->group->outstanding,
1148                 blkdev->num_outstanding_reqs);
1149
1150         spin_lock_irqsave(&blkdev->lock, flags);
1151
1152         blkdev->num_outstanding_reqs--;
1153         blkvsc_req->group->outstanding--;
1154
1155         /*
1156          * Only start processing when all the blkvsc_reqs are
1157          * completed. This guarantees no out-of-order blkvsc_req
1158          * completion when calling end_that_request_first()
1159          */
1160         if (blkvsc_req->group->outstanding == 0)
1161         {
1162                 list_for_each_entry_safe(comp_req, tmp, &blkvsc_req->group->blkvsc_req_list, req_entry)
1163                 {
1164                         DPRINT_DBG(BLKVSC_DRV, "completing blkvsc_req %p sect_start %lu sect_count %ld \n",
1165                                 comp_req,
1166                                 (unsigned long) comp_req->sector_start,
1167                                 comp_req->sector_count);
1168
1169                         list_del(&comp_req->req_entry);
1170
1171                         if (!__blk_end_request(
1172                                 comp_req->req,
1173                                 (!comp_req->request.Status ? 0: -EIO),
1174                                 comp_req->sector_count * blkdev->sector_size))
1175                         {
1176                                 /* All the sectors have been xferred ie the request is done */
1177                                 DPRINT_DBG(BLKVSC_DRV, "req %p COMPLETED\n", comp_req->req);
1178                                 kmem_cache_free(blkdev->request_pool, comp_req->group);
1179                         }
1180
1181                         kmem_cache_free(blkdev->request_pool, comp_req);
1182                 }
1183
1184                 if (!blkdev->shutting_down)
1185                 {
1186                         blkvsc_do_pending_reqs(blkdev);
1187                         blk_start_queue(blkdev->gd->queue);
1188                         blkvsc_request(blkdev->gd->queue);
1189                 }
1190         }
1191
1192         spin_unlock_irqrestore(&blkdev->lock, flags);
1193 }
1194
1195 static int blkvsc_cancel_pending_reqs(struct block_device_context *blkdev)
1196 {
1197         struct blkvsc_request *pend_req, *tmp;
1198         struct blkvsc_request *comp_req, *tmp2;
1199
1200         int ret=0;
1201
1202         DPRINT_DBG(BLKVSC_DRV, "blkvsc_cancel_pending_reqs()");
1203
1204         /* Flush the pending list first */
1205         list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list, pend_entry)
1206         {
1207                 /*
1208                  * The pend_req could be part of a partially completed
1209                  * request. If so, complete those req first until we
1210                  * hit the pend_req
1211                  */
1212                 list_for_each_entry_safe(comp_req, tmp2, &pend_req->group->blkvsc_req_list, req_entry)
1213                 {
1214                         DPRINT_DBG(BLKVSC_DRV, "completing blkvsc_req %p sect_start %lu sect_count %ld \n",
1215                                 comp_req,
1216                                 (unsigned long) comp_req->sector_start,
1217                                 comp_req->sector_count);
1218
1219                         if (comp_req == pend_req)
1220                                 break;
1221
1222                         list_del(&comp_req->req_entry);
1223
1224                         if (comp_req->req)
1225                         {
1226                         ret = __blk_end_request(
1227                             comp_req->req,
1228                             (!comp_req->request.Status ? 0 : -EIO),
1229                             comp_req->sector_count * blkdev->sector_size);
1230                         ASSERT(ret != 0);
1231                         }
1232
1233                         kmem_cache_free(blkdev->request_pool, comp_req);
1234                 }
1235
1236                 DPRINT_DBG(BLKVSC_DRV, "cancelling pending request - %p\n", pend_req);
1237
1238                 list_del(&pend_req->pend_entry);
1239
1240                 list_del(&pend_req->req_entry);
1241
1242                 if (comp_req->req)
1243                 {
1244                 if (!__blk_end_request(
1245                         pend_req->req,
1246                         -EIO,
1247                         pend_req->sector_count * blkdev->sector_size))
1248                 {
1249                         /* All the sectors have been xferred ie the request is done */
1250                         DPRINT_DBG(BLKVSC_DRV, "blkvsc_cancel_pending_reqs() - req %p COMPLETED\n", pend_req->req);
1251                         kmem_cache_free(blkdev->request_pool, pend_req->group);
1252                 }
1253                 }
1254
1255                 kmem_cache_free(blkdev->request_pool, pend_req);
1256         }
1257
1258         return ret;
1259 }
1260
1261 static int blkvsc_do_pending_reqs(struct block_device_context *blkdev)
1262 {
1263         struct blkvsc_request *pend_req, *tmp;
1264         int ret=0;
1265
1266         /* Flush the pending list first */
1267         list_for_each_entry_safe(pend_req, tmp, &blkdev->pending_list, pend_entry)
1268         {
1269                 DPRINT_DBG(BLKVSC_DRV, "working off pending_list - %p\n", pend_req);
1270
1271                 ret = blkvsc_submit_request(pend_req, blkvsc_request_completion);
1272                 if (ret != 0)
1273                 {
1274                         break;
1275                 }
1276                 else
1277                 {
1278                         list_del(&pend_req->pend_entry);
1279                 }
1280         }
1281
1282         return ret;
1283 }
1284
1285 static void blkvsc_request(struct request_queue *queue)
1286 {
1287         struct block_device_context *blkdev = NULL;
1288         struct request *req;
1289         int ret=0;
1290
1291         DPRINT_DBG(BLKVSC_DRV, "- enter \n");
1292         while ((req = blk_peek_request(queue)) != NULL)
1293         {
1294                 DPRINT_DBG(BLKVSC_DRV, "- req %p\n", req);
1295
1296                 blkdev = req->rq_disk->private_data;
1297                 if (blkdev->shutting_down || !blk_fs_request(req) || blkdev->media_not_present) {
1298                         __blk_end_request_cur(req, 0);
1299                         continue;
1300                 }
1301
1302                 ret = blkvsc_do_pending_reqs(blkdev);
1303
1304                 if (ret != 0)
1305                 {
1306                         DPRINT_DBG(BLKVSC_DRV, "- stop queue - pending_list not empty\n");
1307                         blk_stop_queue(queue);
1308                         break;
1309                 }
1310
1311                 blk_start_request(req);
1312
1313                 ret = blkvsc_do_request(blkdev, req);
1314                 if (ret > 0)
1315                 {
1316                         DPRINT_DBG(BLKVSC_DRV, "- stop queue - no room\n");
1317                         blk_stop_queue(queue);
1318                         break;
1319                 }
1320                 else if (ret < 0)
1321                 {
1322                         DPRINT_DBG(BLKVSC_DRV, "- stop queue - no mem\n");
1323                         blk_requeue_request(queue, req);
1324                         blk_stop_queue(queue);
1325                         break;
1326                 }
1327         }
1328 }
1329
1330 static int blkvsc_open(struct block_device *bdev,  fmode_t mode)
1331 {
1332         struct block_device_context *blkdev = bdev->bd_disk->private_data;
1333
1334         DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users, blkdev->gd->disk_name);
1335
1336         spin_lock(&blkdev->lock);
1337
1338         if (!blkdev->users && blkdev->device_type == DVD_TYPE)
1339         {
1340                 spin_unlock(&blkdev->lock);
1341                 check_disk_change(bdev);
1342                 spin_lock(&blkdev->lock);
1343         }
1344
1345         blkdev->users++;
1346
1347         spin_unlock(&blkdev->lock);
1348         return 0;
1349 }
1350
1351 static int blkvsc_release(struct gendisk *disk, fmode_t mode)
1352 {
1353         struct block_device_context *blkdev = disk->private_data;
1354
1355         DPRINT_DBG(BLKVSC_DRV, "- users %d disk %s\n", blkdev->users, blkdev->gd->disk_name);
1356
1357         spin_lock(&blkdev->lock);
1358         if (blkdev->users == 1)
1359         {
1360                 spin_unlock(&blkdev->lock);
1361                 blkvsc_do_flush(blkdev);
1362                 spin_lock(&blkdev->lock);
1363         }
1364
1365         blkdev->users--;
1366
1367         spin_unlock(&blkdev->lock);
1368         return 0;
1369 }
1370
1371 static int blkvsc_media_changed(struct gendisk *gd)
1372 {
1373         DPRINT_DBG(BLKVSC_DRV, "- enter\n");
1374
1375         return 1;
1376 }
1377
1378 static int blkvsc_revalidate_disk(struct gendisk *gd)
1379 {
1380         struct block_device_context *blkdev = gd->private_data;
1381
1382         DPRINT_DBG(BLKVSC_DRV, "- enter\n");
1383
1384         if (blkdev->device_type == DVD_TYPE)
1385         {
1386                 blkvsc_do_read_capacity(blkdev);
1387                 set_capacity(blkdev->gd, blkdev->capacity * (blkdev->sector_size/512));
1388                 blk_queue_logical_block_size(gd->queue, blkdev->sector_size);
1389         }
1390         return 0;
1391 }
1392
1393 int blkvsc_getgeo(struct block_device *bd, struct hd_geometry *hg)
1394 {
1395         sector_t total_sectors = get_capacity(bd->bd_disk);
1396         sector_t cylinder_times_heads=0;
1397         sector_t temp=0;
1398
1399         int sectors_per_track=0;
1400         int heads=0;
1401         int cylinders=0;
1402         int rem=0;
1403
1404     if (total_sectors > (65535 * 16 * 255)) {
1405         total_sectors = (65535 * 16 * 255);
1406     }
1407
1408     if (total_sectors >= (65535 * 16 * 63)) {
1409         sectors_per_track = 255;
1410         heads = 16;
1411
1412                 cylinder_times_heads = total_sectors;
1413                 rem = sector_div(cylinder_times_heads, sectors_per_track); /* sector_div stores the quotient in cylinder_times_heads */
1414     }
1415         else
1416         {
1417         sectors_per_track = 17;
1418
1419                 cylinder_times_heads = total_sectors;
1420         rem = sector_div(cylinder_times_heads, sectors_per_track);      /* sector_div stores the quotient in cylinder_times_heads */
1421
1422                 temp = cylinder_times_heads + 1023;
1423                 rem = sector_div(temp, 1024);   /* sector_div stores the quotient in temp */
1424
1425                 heads = temp;
1426
1427         if (heads < 4) {
1428             heads = 4;
1429         }
1430
1431         if (cylinder_times_heads >= (heads * 1024) || (heads > 16)) {
1432             sectors_per_track = 31;
1433             heads = 16;
1434
1435                         cylinder_times_heads = total_sectors;
1436             rem = sector_div(cylinder_times_heads, sectors_per_track); /* sector_div stores the quotient in cylinder_times_heads */
1437         }
1438
1439         if (cylinder_times_heads >= (heads * 1024)) {
1440             sectors_per_track = 63;
1441             heads = 16;
1442
1443                         cylinder_times_heads = total_sectors;
1444             rem = sector_div(cylinder_times_heads, sectors_per_track); /* sector_div stores the quotient in cylinder_times_heads */
1445         }
1446     }
1447
1448         temp = cylinder_times_heads;
1449     rem = sector_div(temp, heads); /* sector_div stores the quotient in temp */
1450         cylinders = temp;
1451
1452         hg->heads = heads;
1453     hg->sectors = sectors_per_track;
1454     hg->cylinders = cylinders;
1455
1456         DPRINT_INFO(BLKVSC_DRV, "CHS (%d, %d, %d)", cylinders, heads, sectors_per_track);
1457
1458     return 0;
1459 }
1460
1461 static int blkvsc_ioctl(struct block_device *bd, fmode_t mode,
1462                         unsigned cmd, unsigned long argument)
1463 {
1464         struct block_device_context *blkdev = bd->bd_disk->private_data;
1465         int ret=0;
1466
1467         switch (cmd)
1468         {
1469         /* TODO: I think there is certain format for HDIO_GET_IDENTITY rather than just */
1470         /* a GUID. Commented it out for now. */
1471         /*case HDIO_GET_IDENTITY:
1472                 DPRINT_INFO(BLKVSC_DRV, "HDIO_GET_IDENTITY\n");
1473
1474                 if (copy_to_user((void __user *)arg, blkdev->device_id, blkdev->device_id_len))
1475                 {
1476                         ret = -EFAULT;
1477                 }
1478
1479                 break;*/
1480         default:
1481                 ret = -EINVAL;
1482                 break;
1483         }
1484
1485         return ret;
1486 }
1487
1488
1489 MODULE_LICENSE("GPL");
1490
1491 static int __init blkvsc_init(void)
1492 {
1493         int ret;
1494
1495         ASSERT(sizeof(sector_t) == 8); /* Make sure CONFIG_LBD is set */
1496
1497         DPRINT_ENTER(BLKVSC_DRV);
1498
1499         DPRINT_INFO(BLKVSC_DRV, "Blkvsc initializing....");
1500
1501         ret = blkvsc_drv_init(BlkVscInitialize);
1502
1503         DPRINT_EXIT(BLKVSC_DRV);
1504
1505         return ret;
1506 }
1507
1508 static void __exit blkvsc_exit(void)
1509 {
1510         DPRINT_ENTER(BLKVSC_DRV);
1511
1512         blkvsc_drv_exit();
1513
1514         DPRINT_ENTER(BLKVSC_DRV);
1515 }
1516
1517 module_param(blkvsc_ringbuffer_size, int, S_IRUGO);
1518
1519 module_init(blkvsc_init);
1520 module_exit(blkvsc_exit);
1521
1522 /* eof */