]> Pileus Git - ~andy/linux/blob - drivers/block/xen-blkback/blkback.c
xen-blkif: drop struct blkif_request_segment_aligned
[~andy/linux] / drivers / block / xen-blkback / blkback.c
1 /******************************************************************************
2  *
3  * Back-end of the driver for virtual block devices. This portion of the
4  * driver exports a 'unified' block-device interface that can be accessed
5  * by any operating system that implements a compatible front end. A
6  * reference front-end implementation can be found in:
7  *  drivers/block/xen-blkfront.c
8  *
9  * Copyright (c) 2003-2004, Keir Fraser & Steve Hand
10  * Copyright (c) 2005, Christopher Clark
11  *
12  * This program is free software; you can redistribute it and/or
13  * modify it under the terms of the GNU General Public License version 2
14  * as published by the Free Software Foundation; or, when distributed
15  * separately from the Linux kernel or incorporated into other
16  * software packages, subject to the following license:
17  *
18  * Permission is hereby granted, free of charge, to any person obtaining a copy
19  * of this source file (the "Software"), to deal in the Software without
20  * restriction, including without limitation the rights to use, copy, modify,
21  * merge, publish, distribute, sublicense, and/or sell copies of the Software,
22  * and to permit persons to whom the Software is furnished to do so, subject to
23  * the following conditions:
24  *
25  * The above copyright notice and this permission notice shall be included in
26  * all copies or substantial portions of the Software.
27  *
28  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
29  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
30  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
31  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
32  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
33  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
34  * IN THE SOFTWARE.
35  */
36
37 #include <linux/spinlock.h>
38 #include <linux/kthread.h>
39 #include <linux/list.h>
40 #include <linux/delay.h>
41 #include <linux/freezer.h>
42 #include <linux/bitmap.h>
43
44 #include <xen/events.h>
45 #include <xen/page.h>
46 #include <xen/xen.h>
47 #include <asm/xen/hypervisor.h>
48 #include <asm/xen/hypercall.h>
49 #include <xen/balloon.h>
50 #include "common.h"
51
52 /*
53  * Maximum number of unused free pages to keep in the internal buffer.
54  * Setting this to a value too low will reduce memory used in each backend,
55  * but can have a performance penalty.
56  *
57  * A sane value is xen_blkif_reqs * BLKIF_MAX_SEGMENTS_PER_REQUEST, but can
58  * be set to a lower value that might degrade performance on some intensive
59  * IO workloads.
60  */
61
62 static int xen_blkif_max_buffer_pages = 1024;
63 module_param_named(max_buffer_pages, xen_blkif_max_buffer_pages, int, 0644);
64 MODULE_PARM_DESC(max_buffer_pages,
65 "Maximum number of free pages to keep in each block backend buffer");
66
67 /*
68  * Maximum number of grants to map persistently in blkback. For maximum
69  * performance this should be the total numbers of grants that can be used
70  * to fill the ring, but since this might become too high, specially with
71  * the use of indirect descriptors, we set it to a value that provides good
72  * performance without using too much memory.
73  *
74  * When the list of persistent grants is full we clean it up using a LRU
75  * algorithm.
76  */
77
78 static int xen_blkif_max_pgrants = 1056;
79 module_param_named(max_persistent_grants, xen_blkif_max_pgrants, int, 0644);
80 MODULE_PARM_DESC(max_persistent_grants,
81                  "Maximum number of grants to map persistently");
82
83 /*
84  * The LRU mechanism to clean the lists of persistent grants needs to
85  * be executed periodically. The time interval between consecutive executions
86  * of the purge mechanism is set in ms.
87  */
88 #define LRU_INTERVAL 100
89
90 /*
91  * When the persistent grants list is full we will remove unused grants
92  * from the list. The percent number of grants to be removed at each LRU
93  * execution.
94  */
95 #define LRU_PERCENT_CLEAN 5
96
97 /* Run-time switchable: /sys/module/blkback/parameters/ */
98 static unsigned int log_stats;
99 module_param(log_stats, int, 0644);
100
101 #define BLKBACK_INVALID_HANDLE (~0)
102
103 /* Number of free pages to remove on each call to free_xenballooned_pages */
104 #define NUM_BATCH_FREE_PAGES 10
105
106 static inline int get_free_page(struct xen_blkif *blkif, struct page **page)
107 {
108         unsigned long flags;
109
110         spin_lock_irqsave(&blkif->free_pages_lock, flags);
111         if (list_empty(&blkif->free_pages)) {
112                 BUG_ON(blkif->free_pages_num != 0);
113                 spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
114                 return alloc_xenballooned_pages(1, page, false);
115         }
116         BUG_ON(blkif->free_pages_num == 0);
117         page[0] = list_first_entry(&blkif->free_pages, struct page, lru);
118         list_del(&page[0]->lru);
119         blkif->free_pages_num--;
120         spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
121
122         return 0;
123 }
124
125 static inline void put_free_pages(struct xen_blkif *blkif, struct page **page,
126                                   int num)
127 {
128         unsigned long flags;
129         int i;
130
131         spin_lock_irqsave(&blkif->free_pages_lock, flags);
132         for (i = 0; i < num; i++)
133                 list_add(&page[i]->lru, &blkif->free_pages);
134         blkif->free_pages_num += num;
135         spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
136 }
137
138 static inline void shrink_free_pagepool(struct xen_blkif *blkif, int num)
139 {
140         /* Remove requested pages in batches of NUM_BATCH_FREE_PAGES */
141         struct page *page[NUM_BATCH_FREE_PAGES];
142         unsigned int num_pages = 0;
143         unsigned long flags;
144
145         spin_lock_irqsave(&blkif->free_pages_lock, flags);
146         while (blkif->free_pages_num > num) {
147                 BUG_ON(list_empty(&blkif->free_pages));
148                 page[num_pages] = list_first_entry(&blkif->free_pages,
149                                                    struct page, lru);
150                 list_del(&page[num_pages]->lru);
151                 blkif->free_pages_num--;
152                 if (++num_pages == NUM_BATCH_FREE_PAGES) {
153                         spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
154                         free_xenballooned_pages(num_pages, page);
155                         spin_lock_irqsave(&blkif->free_pages_lock, flags);
156                         num_pages = 0;
157                 }
158         }
159         spin_unlock_irqrestore(&blkif->free_pages_lock, flags);
160         if (num_pages != 0)
161                 free_xenballooned_pages(num_pages, page);
162 }
163
164 #define vaddr(page) ((unsigned long)pfn_to_kaddr(page_to_pfn(page)))
165
166 static int do_block_io_op(struct xen_blkif *blkif);
167 static int dispatch_rw_block_io(struct xen_blkif *blkif,
168                                 struct blkif_request *req,
169                                 struct pending_req *pending_req);
170 static void make_response(struct xen_blkif *blkif, u64 id,
171                           unsigned short op, int st);
172
173 #define foreach_grant_safe(pos, n, rbtree, node) \
174         for ((pos) = container_of(rb_first((rbtree)), typeof(*(pos)), node), \
175              (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL; \
176              &(pos)->node != NULL; \
177              (pos) = container_of(n, typeof(*(pos)), node), \
178              (n) = (&(pos)->node != NULL) ? rb_next(&(pos)->node) : NULL)
179
180
181 /*
182  * We don't need locking around the persistent grant helpers
183  * because blkback uses a single-thread for each backed, so we
184  * can be sure that this functions will never be called recursively.
185  *
186  * The only exception to that is put_persistent_grant, that can be called
187  * from interrupt context (by xen_blkbk_unmap), so we have to use atomic
188  * bit operations to modify the flags of a persistent grant and to count
189  * the number of used grants.
190  */
191 static int add_persistent_gnt(struct xen_blkif *blkif,
192                                struct persistent_gnt *persistent_gnt)
193 {
194         struct rb_node **new = NULL, *parent = NULL;
195         struct persistent_gnt *this;
196
197         if (blkif->persistent_gnt_c >= xen_blkif_max_pgrants) {
198                 if (!blkif->vbd.overflow_max_grants)
199                         blkif->vbd.overflow_max_grants = 1;
200                 return -EBUSY;
201         }
202         /* Figure out where to put new node */
203         new = &blkif->persistent_gnts.rb_node;
204         while (*new) {
205                 this = container_of(*new, struct persistent_gnt, node);
206
207                 parent = *new;
208                 if (persistent_gnt->gnt < this->gnt)
209                         new = &((*new)->rb_left);
210                 else if (persistent_gnt->gnt > this->gnt)
211                         new = &((*new)->rb_right);
212                 else {
213                         pr_alert_ratelimited(DRV_PFX " trying to add a gref that's already in the tree\n");
214                         return -EINVAL;
215                 }
216         }
217
218         bitmap_zero(persistent_gnt->flags, PERSISTENT_GNT_FLAGS_SIZE);
219         set_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
220         /* Add new node and rebalance tree. */
221         rb_link_node(&(persistent_gnt->node), parent, new);
222         rb_insert_color(&(persistent_gnt->node), &blkif->persistent_gnts);
223         blkif->persistent_gnt_c++;
224         atomic_inc(&blkif->persistent_gnt_in_use);
225         return 0;
226 }
227
228 static struct persistent_gnt *get_persistent_gnt(struct xen_blkif *blkif,
229                                                  grant_ref_t gref)
230 {
231         struct persistent_gnt *data;
232         struct rb_node *node = NULL;
233
234         node = blkif->persistent_gnts.rb_node;
235         while (node) {
236                 data = container_of(node, struct persistent_gnt, node);
237
238                 if (gref < data->gnt)
239                         node = node->rb_left;
240                 else if (gref > data->gnt)
241                         node = node->rb_right;
242                 else {
243                         if(test_bit(PERSISTENT_GNT_ACTIVE, data->flags)) {
244                                 pr_alert_ratelimited(DRV_PFX " requesting a grant already in use\n");
245                                 return NULL;
246                         }
247                         set_bit(PERSISTENT_GNT_ACTIVE, data->flags);
248                         atomic_inc(&blkif->persistent_gnt_in_use);
249                         return data;
250                 }
251         }
252         return NULL;
253 }
254
255 static void put_persistent_gnt(struct xen_blkif *blkif,
256                                struct persistent_gnt *persistent_gnt)
257 {
258         if(!test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
259                   pr_alert_ratelimited(DRV_PFX " freeing a grant already unused");
260         set_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
261         clear_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags);
262         atomic_dec(&blkif->persistent_gnt_in_use);
263 }
264
265 static void free_persistent_gnts(struct xen_blkif *blkif, struct rb_root *root,
266                                  unsigned int num)
267 {
268         struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
269         struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
270         struct persistent_gnt *persistent_gnt;
271         struct rb_node *n;
272         int ret = 0;
273         int segs_to_unmap = 0;
274
275         foreach_grant_safe(persistent_gnt, n, root, node) {
276                 BUG_ON(persistent_gnt->handle ==
277                         BLKBACK_INVALID_HANDLE);
278                 gnttab_set_unmap_op(&unmap[segs_to_unmap],
279                         (unsigned long) pfn_to_kaddr(page_to_pfn(
280                                 persistent_gnt->page)),
281                         GNTMAP_host_map,
282                         persistent_gnt->handle);
283
284                 pages[segs_to_unmap] = persistent_gnt->page;
285
286                 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST ||
287                         !rb_next(&persistent_gnt->node)) {
288                         ret = gnttab_unmap_refs(unmap, NULL, pages,
289                                 segs_to_unmap);
290                         BUG_ON(ret);
291                         put_free_pages(blkif, pages, segs_to_unmap);
292                         segs_to_unmap = 0;
293                 }
294
295                 rb_erase(&persistent_gnt->node, root);
296                 kfree(persistent_gnt);
297                 num--;
298         }
299         BUG_ON(num != 0);
300 }
301
302 static void unmap_purged_grants(struct work_struct *work)
303 {
304         struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
305         struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
306         struct persistent_gnt *persistent_gnt;
307         int ret, segs_to_unmap = 0;
308         struct xen_blkif *blkif = container_of(work, typeof(*blkif), persistent_purge_work);
309
310         while(!list_empty(&blkif->persistent_purge_list)) {
311                 persistent_gnt = list_first_entry(&blkif->persistent_purge_list,
312                                                   struct persistent_gnt,
313                                                   remove_node);
314                 list_del(&persistent_gnt->remove_node);
315
316                 gnttab_set_unmap_op(&unmap[segs_to_unmap],
317                         vaddr(persistent_gnt->page),
318                         GNTMAP_host_map,
319                         persistent_gnt->handle);
320
321                 pages[segs_to_unmap] = persistent_gnt->page;
322
323                 if (++segs_to_unmap == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
324                         ret = gnttab_unmap_refs(unmap, NULL, pages,
325                                 segs_to_unmap);
326                         BUG_ON(ret);
327                         put_free_pages(blkif, pages, segs_to_unmap);
328                         segs_to_unmap = 0;
329                 }
330                 kfree(persistent_gnt);
331         }
332         if (segs_to_unmap > 0) {
333                 ret = gnttab_unmap_refs(unmap, NULL, pages, segs_to_unmap);
334                 BUG_ON(ret);
335                 put_free_pages(blkif, pages, segs_to_unmap);
336         }
337 }
338
339 static void purge_persistent_gnt(struct xen_blkif *blkif)
340 {
341         struct persistent_gnt *persistent_gnt;
342         struct rb_node *n;
343         unsigned int num_clean, total;
344         bool scan_used = false, clean_used = false;
345         struct rb_root *root;
346
347         if (blkif->persistent_gnt_c < xen_blkif_max_pgrants ||
348             (blkif->persistent_gnt_c == xen_blkif_max_pgrants &&
349             !blkif->vbd.overflow_max_grants)) {
350                 return;
351         }
352
353         if (work_pending(&blkif->persistent_purge_work)) {
354                 pr_alert_ratelimited(DRV_PFX "Scheduled work from previous purge is still pending, cannot purge list\n");
355                 return;
356         }
357
358         num_clean = (xen_blkif_max_pgrants / 100) * LRU_PERCENT_CLEAN;
359         num_clean = blkif->persistent_gnt_c - xen_blkif_max_pgrants + num_clean;
360         num_clean = min(blkif->persistent_gnt_c, num_clean);
361         if ((num_clean == 0) ||
362             (num_clean > (blkif->persistent_gnt_c - atomic_read(&blkif->persistent_gnt_in_use))))
363                 return;
364
365         /*
366          * At this point, we can assure that there will be no calls
367          * to get_persistent_grant (because we are executing this code from
368          * xen_blkif_schedule), there can only be calls to put_persistent_gnt,
369          * which means that the number of currently used grants will go down,
370          * but never up, so we will always be able to remove the requested
371          * number of grants.
372          */
373
374         total = num_clean;
375
376         pr_debug(DRV_PFX "Going to purge %u persistent grants\n", num_clean);
377
378         BUG_ON(!list_empty(&blkif->persistent_purge_list));
379         root = &blkif->persistent_gnts;
380 purge_list:
381         foreach_grant_safe(persistent_gnt, n, root, node) {
382                 BUG_ON(persistent_gnt->handle ==
383                         BLKBACK_INVALID_HANDLE);
384
385                 if (clean_used) {
386                         clear_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags);
387                         continue;
388                 }
389
390                 if (test_bit(PERSISTENT_GNT_ACTIVE, persistent_gnt->flags))
391                         continue;
392                 if (!scan_used &&
393                     (test_bit(PERSISTENT_GNT_WAS_ACTIVE, persistent_gnt->flags)))
394                         continue;
395
396                 rb_erase(&persistent_gnt->node, root);
397                 list_add(&persistent_gnt->remove_node,
398                          &blkif->persistent_purge_list);
399                 if (--num_clean == 0)
400                         goto finished;
401         }
402         /*
403          * If we get here it means we also need to start cleaning
404          * grants that were used since last purge in order to cope
405          * with the requested num
406          */
407         if (!scan_used && !clean_used) {
408                 pr_debug(DRV_PFX "Still missing %u purged frames\n", num_clean);
409                 scan_used = true;
410                 goto purge_list;
411         }
412 finished:
413         if (!clean_used) {
414                 pr_debug(DRV_PFX "Finished scanning for grants to clean, removing used flag\n");
415                 clean_used = true;
416                 goto purge_list;
417         }
418
419         blkif->persistent_gnt_c -= (total - num_clean);
420         blkif->vbd.overflow_max_grants = 0;
421
422         /* We can defer this work */
423         INIT_WORK(&blkif->persistent_purge_work, unmap_purged_grants);
424         schedule_work(&blkif->persistent_purge_work);
425         pr_debug(DRV_PFX "Purged %u/%u\n", (total - num_clean), total);
426         return;
427 }
428
429 /*
430  * Retrieve from the 'pending_reqs' a free pending_req structure to be used.
431  */
432 static struct pending_req *alloc_req(struct xen_blkif *blkif)
433 {
434         struct pending_req *req = NULL;
435         unsigned long flags;
436
437         spin_lock_irqsave(&blkif->pending_free_lock, flags);
438         if (!list_empty(&blkif->pending_free)) {
439                 req = list_entry(blkif->pending_free.next, struct pending_req,
440                                  free_list);
441                 list_del(&req->free_list);
442         }
443         spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
444         return req;
445 }
446
447 /*
448  * Return the 'pending_req' structure back to the freepool. We also
449  * wake up the thread if it was waiting for a free page.
450  */
451 static void free_req(struct xen_blkif *blkif, struct pending_req *req)
452 {
453         unsigned long flags;
454         int was_empty;
455
456         spin_lock_irqsave(&blkif->pending_free_lock, flags);
457         was_empty = list_empty(&blkif->pending_free);
458         list_add(&req->free_list, &blkif->pending_free);
459         spin_unlock_irqrestore(&blkif->pending_free_lock, flags);
460         if (was_empty)
461                 wake_up(&blkif->pending_free_wq);
462 }
463
464 /*
465  * Routines for managing virtual block devices (vbds).
466  */
467 static int xen_vbd_translate(struct phys_req *req, struct xen_blkif *blkif,
468                              int operation)
469 {
470         struct xen_vbd *vbd = &blkif->vbd;
471         int rc = -EACCES;
472
473         if ((operation != READ) && vbd->readonly)
474                 goto out;
475
476         if (likely(req->nr_sects)) {
477                 blkif_sector_t end = req->sector_number + req->nr_sects;
478
479                 if (unlikely(end < req->sector_number))
480                         goto out;
481                 if (unlikely(end > vbd_sz(vbd)))
482                         goto out;
483         }
484
485         req->dev  = vbd->pdevice;
486         req->bdev = vbd->bdev;
487         rc = 0;
488
489  out:
490         return rc;
491 }
492
493 static void xen_vbd_resize(struct xen_blkif *blkif)
494 {
495         struct xen_vbd *vbd = &blkif->vbd;
496         struct xenbus_transaction xbt;
497         int err;
498         struct xenbus_device *dev = xen_blkbk_xenbus(blkif->be);
499         unsigned long long new_size = vbd_sz(vbd);
500
501         pr_info(DRV_PFX "VBD Resize: Domid: %d, Device: (%d, %d)\n",
502                 blkif->domid, MAJOR(vbd->pdevice), MINOR(vbd->pdevice));
503         pr_info(DRV_PFX "VBD Resize: new size %llu\n", new_size);
504         vbd->size = new_size;
505 again:
506         err = xenbus_transaction_start(&xbt);
507         if (err) {
508                 pr_warn(DRV_PFX "Error starting transaction");
509                 return;
510         }
511         err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
512                             (unsigned long long)vbd_sz(vbd));
513         if (err) {
514                 pr_warn(DRV_PFX "Error writing new size");
515                 goto abort;
516         }
517         /*
518          * Write the current state; we will use this to synchronize
519          * the front-end. If the current state is "connected" the
520          * front-end will get the new size information online.
521          */
522         err = xenbus_printf(xbt, dev->nodename, "state", "%d", dev->state);
523         if (err) {
524                 pr_warn(DRV_PFX "Error writing the state");
525                 goto abort;
526         }
527
528         err = xenbus_transaction_end(xbt, 0);
529         if (err == -EAGAIN)
530                 goto again;
531         if (err)
532                 pr_warn(DRV_PFX "Error ending transaction");
533         return;
534 abort:
535         xenbus_transaction_end(xbt, 1);
536 }
537
538 /*
539  * Notification from the guest OS.
540  */
541 static void blkif_notify_work(struct xen_blkif *blkif)
542 {
543         blkif->waiting_reqs = 1;
544         wake_up(&blkif->wq);
545 }
546
547 irqreturn_t xen_blkif_be_int(int irq, void *dev_id)
548 {
549         blkif_notify_work(dev_id);
550         return IRQ_HANDLED;
551 }
552
553 /*
554  * SCHEDULER FUNCTIONS
555  */
556
557 static void print_stats(struct xen_blkif *blkif)
558 {
559         pr_info("xen-blkback (%s): oo %3llu  |  rd %4llu  |  wr %4llu  |  f %4llu"
560                  "  |  ds %4llu | pg: %4u/%4d\n",
561                  current->comm, blkif->st_oo_req,
562                  blkif->st_rd_req, blkif->st_wr_req,
563                  blkif->st_f_req, blkif->st_ds_req,
564                  blkif->persistent_gnt_c,
565                  xen_blkif_max_pgrants);
566         blkif->st_print = jiffies + msecs_to_jiffies(10 * 1000);
567         blkif->st_rd_req = 0;
568         blkif->st_wr_req = 0;
569         blkif->st_oo_req = 0;
570         blkif->st_ds_req = 0;
571 }
572
573 int xen_blkif_schedule(void *arg)
574 {
575         struct xen_blkif *blkif = arg;
576         struct xen_vbd *vbd = &blkif->vbd;
577         unsigned long timeout;
578         int ret;
579
580         xen_blkif_get(blkif);
581
582         while (!kthread_should_stop()) {
583                 if (try_to_freeze())
584                         continue;
585                 if (unlikely(vbd->size != vbd_sz(vbd)))
586                         xen_vbd_resize(blkif);
587
588                 timeout = msecs_to_jiffies(LRU_INTERVAL);
589
590                 timeout = wait_event_interruptible_timeout(
591                         blkif->wq,
592                         blkif->waiting_reqs || kthread_should_stop(),
593                         timeout);
594                 if (timeout == 0)
595                         goto purge_gnt_list;
596                 timeout = wait_event_interruptible_timeout(
597                         blkif->pending_free_wq,
598                         !list_empty(&blkif->pending_free) ||
599                         kthread_should_stop(),
600                         timeout);
601                 if (timeout == 0)
602                         goto purge_gnt_list;
603
604                 blkif->waiting_reqs = 0;
605                 smp_mb(); /* clear flag *before* checking for work */
606
607                 ret = do_block_io_op(blkif);
608                 if (ret > 0)
609                         blkif->waiting_reqs = 1;
610                 if (ret == -EACCES)
611                         wait_event_interruptible(blkif->shutdown_wq,
612                                                  kthread_should_stop());
613
614 purge_gnt_list:
615                 if (blkif->vbd.feature_gnt_persistent &&
616                     time_after(jiffies, blkif->next_lru)) {
617                         purge_persistent_gnt(blkif);
618                         blkif->next_lru = jiffies + msecs_to_jiffies(LRU_INTERVAL);
619                 }
620
621                 /* Shrink if we have more than xen_blkif_max_buffer_pages */
622                 shrink_free_pagepool(blkif, xen_blkif_max_buffer_pages);
623
624                 if (log_stats && time_after(jiffies, blkif->st_print))
625                         print_stats(blkif);
626         }
627
628         /* Drain pending purge work */
629         flush_work(&blkif->persistent_purge_work);
630
631         if (log_stats)
632                 print_stats(blkif);
633
634         blkif->xenblkd = NULL;
635         xen_blkif_put(blkif);
636
637         return 0;
638 }
639
640 /*
641  * Remove persistent grants and empty the pool of free pages
642  */
643 void xen_blkbk_free_caches(struct xen_blkif *blkif)
644 {
645         /* Free all persistent grant pages */
646         if (!RB_EMPTY_ROOT(&blkif->persistent_gnts))
647                 free_persistent_gnts(blkif, &blkif->persistent_gnts,
648                         blkif->persistent_gnt_c);
649
650         BUG_ON(!RB_EMPTY_ROOT(&blkif->persistent_gnts));
651         blkif->persistent_gnt_c = 0;
652
653         /* Since we are shutting down remove all pages from the buffer */
654         shrink_free_pagepool(blkif, 0 /* All */);
655 }
656
657 /*
658  * Unmap the grant references, and also remove the M2P over-rides
659  * used in the 'pending_req'.
660  */
661 static void xen_blkbk_unmap(struct xen_blkif *blkif,
662                             struct grant_page *pages[],
663                             int num)
664 {
665         struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
666         struct page *unmap_pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
667         unsigned int i, invcount = 0;
668         int ret;
669
670         for (i = 0; i < num; i++) {
671                 if (pages[i]->persistent_gnt != NULL) {
672                         put_persistent_gnt(blkif, pages[i]->persistent_gnt);
673                         continue;
674                 }
675                 if (pages[i]->handle == BLKBACK_INVALID_HANDLE)
676                         continue;
677                 unmap_pages[invcount] = pages[i]->page;
678                 gnttab_set_unmap_op(&unmap[invcount], vaddr(pages[i]->page),
679                                     GNTMAP_host_map, pages[i]->handle);
680                 pages[i]->handle = BLKBACK_INVALID_HANDLE;
681                 if (++invcount == BLKIF_MAX_SEGMENTS_PER_REQUEST) {
682                         ret = gnttab_unmap_refs(unmap, NULL, unmap_pages,
683                                                 invcount);
684                         BUG_ON(ret);
685                         put_free_pages(blkif, unmap_pages, invcount);
686                         invcount = 0;
687                 }
688         }
689         if (invcount) {
690                 ret = gnttab_unmap_refs(unmap, NULL, unmap_pages, invcount);
691                 BUG_ON(ret);
692                 put_free_pages(blkif, unmap_pages, invcount);
693         }
694 }
695
696 static int xen_blkbk_map(struct xen_blkif *blkif,
697                          struct grant_page *pages[],
698                          int num, bool ro)
699 {
700         struct gnttab_map_grant_ref map[BLKIF_MAX_SEGMENTS_PER_REQUEST];
701         struct page *pages_to_gnt[BLKIF_MAX_SEGMENTS_PER_REQUEST];
702         struct persistent_gnt *persistent_gnt = NULL;
703         phys_addr_t addr = 0;
704         int i, seg_idx, new_map_idx;
705         int segs_to_map = 0;
706         int ret = 0;
707         int last_map = 0, map_until = 0;
708         int use_persistent_gnts;
709
710         use_persistent_gnts = (blkif->vbd.feature_gnt_persistent);
711
712         /*
713          * Fill out preq.nr_sects with proper amount of sectors, and setup
714          * assign map[..] with the PFN of the page in our domain with the
715          * corresponding grant reference for each page.
716          */
717 again:
718         for (i = map_until; i < num; i++) {
719                 uint32_t flags;
720
721                 if (use_persistent_gnts)
722                         persistent_gnt = get_persistent_gnt(
723                                 blkif,
724                                 pages[i]->gref);
725
726                 if (persistent_gnt) {
727                         /*
728                          * We are using persistent grants and
729                          * the grant is already mapped
730                          */
731                         pages[i]->page = persistent_gnt->page;
732                         pages[i]->persistent_gnt = persistent_gnt;
733                 } else {
734                         if (get_free_page(blkif, &pages[i]->page))
735                                 goto out_of_memory;
736                         addr = vaddr(pages[i]->page);
737                         pages_to_gnt[segs_to_map] = pages[i]->page;
738                         pages[i]->persistent_gnt = NULL;
739                         flags = GNTMAP_host_map;
740                         if (!use_persistent_gnts && ro)
741                                 flags |= GNTMAP_readonly;
742                         gnttab_set_map_op(&map[segs_to_map++], addr,
743                                           flags, pages[i]->gref,
744                                           blkif->domid);
745                 }
746                 map_until = i + 1;
747                 if (segs_to_map == BLKIF_MAX_SEGMENTS_PER_REQUEST)
748                         break;
749         }
750
751         if (segs_to_map) {
752                 ret = gnttab_map_refs(map, NULL, pages_to_gnt, segs_to_map);
753                 BUG_ON(ret);
754         }
755
756         /*
757          * Now swizzle the MFN in our domain with the MFN from the other domain
758          * so that when we access vaddr(pending_req,i) it has the contents of
759          * the page from the other domain.
760          */
761         for (seg_idx = last_map, new_map_idx = 0; seg_idx < map_until; seg_idx++) {
762                 if (!pages[seg_idx]->persistent_gnt) {
763                         /* This is a newly mapped grant */
764                         BUG_ON(new_map_idx >= segs_to_map);
765                         if (unlikely(map[new_map_idx].status != 0)) {
766                                 pr_debug(DRV_PFX "invalid buffer -- could not remap it\n");
767                                 pages[seg_idx]->handle = BLKBACK_INVALID_HANDLE;
768                                 ret |= 1;
769                                 goto next;
770                         }
771                         pages[seg_idx]->handle = map[new_map_idx].handle;
772                 } else {
773                         continue;
774                 }
775                 if (use_persistent_gnts &&
776                     blkif->persistent_gnt_c < xen_blkif_max_pgrants) {
777                         /*
778                          * We are using persistent grants, the grant is
779                          * not mapped but we might have room for it.
780                          */
781                         persistent_gnt = kmalloc(sizeof(struct persistent_gnt),
782                                                  GFP_KERNEL);
783                         if (!persistent_gnt) {
784                                 /*
785                                  * If we don't have enough memory to
786                                  * allocate the persistent_gnt struct
787                                  * map this grant non-persistenly
788                                  */
789                                 goto next;
790                         }
791                         persistent_gnt->gnt = map[new_map_idx].ref;
792                         persistent_gnt->handle = map[new_map_idx].handle;
793                         persistent_gnt->page = pages[seg_idx]->page;
794                         if (add_persistent_gnt(blkif,
795                                                persistent_gnt)) {
796                                 kfree(persistent_gnt);
797                                 persistent_gnt = NULL;
798                                 goto next;
799                         }
800                         pages[seg_idx]->persistent_gnt = persistent_gnt;
801                         pr_debug(DRV_PFX " grant %u added to the tree of persistent grants, using %u/%u\n",
802                                  persistent_gnt->gnt, blkif->persistent_gnt_c,
803                                  xen_blkif_max_pgrants);
804                         goto next;
805                 }
806                 if (use_persistent_gnts && !blkif->vbd.overflow_max_grants) {
807                         blkif->vbd.overflow_max_grants = 1;
808                         pr_debug(DRV_PFX " domain %u, device %#x is using maximum number of persistent grants\n",
809                                  blkif->domid, blkif->vbd.handle);
810                 }
811                 /*
812                  * We could not map this grant persistently, so use it as
813                  * a non-persistent grant.
814                  */
815 next:
816                 new_map_idx++;
817         }
818         segs_to_map = 0;
819         last_map = map_until;
820         if (map_until != num)
821                 goto again;
822
823         return ret;
824
825 out_of_memory:
826         pr_alert(DRV_PFX "%s: out of memory\n", __func__);
827         put_free_pages(blkif, pages_to_gnt, segs_to_map);
828         return -ENOMEM;
829 }
830
831 static int xen_blkbk_map_seg(struct pending_req *pending_req)
832 {
833         int rc;
834
835         rc = xen_blkbk_map(pending_req->blkif, pending_req->segments,
836                            pending_req->nr_pages,
837                            (pending_req->operation != BLKIF_OP_READ));
838
839         return rc;
840 }
841
842 static int xen_blkbk_parse_indirect(struct blkif_request *req,
843                                     struct pending_req *pending_req,
844                                     struct seg_buf seg[],
845                                     struct phys_req *preq)
846 {
847         struct grant_page **pages = pending_req->indirect_pages;
848         struct xen_blkif *blkif = pending_req->blkif;
849         int indirect_grefs, rc, n, nseg, i;
850         struct blkif_request_segment *segments = NULL;
851
852         nseg = pending_req->nr_pages;
853         indirect_grefs = INDIRECT_PAGES(nseg);
854         BUG_ON(indirect_grefs > BLKIF_MAX_INDIRECT_PAGES_PER_REQUEST);
855
856         for (i = 0; i < indirect_grefs; i++)
857                 pages[i]->gref = req->u.indirect.indirect_grefs[i];
858
859         rc = xen_blkbk_map(blkif, pages, indirect_grefs, true);
860         if (rc)
861                 goto unmap;
862
863         for (n = 0, i = 0; n < nseg; n++) {
864                 if ((n % SEGS_PER_INDIRECT_FRAME) == 0) {
865                         /* Map indirect segments */
866                         if (segments)
867                                 kunmap_atomic(segments);
868                         segments = kmap_atomic(pages[n/SEGS_PER_INDIRECT_FRAME]->page);
869                 }
870                 i = n % SEGS_PER_INDIRECT_FRAME;
871                 pending_req->segments[n]->gref = segments[i].gref;
872                 seg[n].nsec = segments[i].last_sect -
873                         segments[i].first_sect + 1;
874                 seg[n].offset = (segments[i].first_sect << 9);
875                 if ((segments[i].last_sect >= (PAGE_SIZE >> 9)) ||
876                     (segments[i].last_sect < segments[i].first_sect)) {
877                         rc = -EINVAL;
878                         goto unmap;
879                 }
880                 preq->nr_sects += seg[n].nsec;
881         }
882
883 unmap:
884         if (segments)
885                 kunmap_atomic(segments);
886         xen_blkbk_unmap(blkif, pages, indirect_grefs);
887         return rc;
888 }
889
890 static int dispatch_discard_io(struct xen_blkif *blkif,
891                                 struct blkif_request *req)
892 {
893         int err = 0;
894         int status = BLKIF_RSP_OKAY;
895         struct block_device *bdev = blkif->vbd.bdev;
896         unsigned long secure;
897         struct phys_req preq;
898
899         xen_blkif_get(blkif);
900
901         preq.sector_number = req->u.discard.sector_number;
902         preq.nr_sects      = req->u.discard.nr_sectors;
903
904         err = xen_vbd_translate(&preq, blkif, WRITE);
905         if (err) {
906                 pr_warn(DRV_PFX "access denied: DISCARD [%llu->%llu] on dev=%04x\n",
907                         preq.sector_number,
908                         preq.sector_number + preq.nr_sects, blkif->vbd.pdevice);
909                 goto fail_response;
910         }
911         blkif->st_ds_req++;
912
913         secure = (blkif->vbd.discard_secure &&
914                  (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
915                  BLKDEV_DISCARD_SECURE : 0;
916
917         err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
918                                    req->u.discard.nr_sectors,
919                                    GFP_KERNEL, secure);
920 fail_response:
921         if (err == -EOPNOTSUPP) {
922                 pr_debug(DRV_PFX "discard op failed, not supported\n");
923                 status = BLKIF_RSP_EOPNOTSUPP;
924         } else if (err)
925                 status = BLKIF_RSP_ERROR;
926
927         make_response(blkif, req->u.discard.id, req->operation, status);
928         xen_blkif_put(blkif);
929         return err;
930 }
931
932 static int dispatch_other_io(struct xen_blkif *blkif,
933                              struct blkif_request *req,
934                              struct pending_req *pending_req)
935 {
936         free_req(blkif, pending_req);
937         make_response(blkif, req->u.other.id, req->operation,
938                       BLKIF_RSP_EOPNOTSUPP);
939         return -EIO;
940 }
941
942 static void xen_blk_drain_io(struct xen_blkif *blkif)
943 {
944         atomic_set(&blkif->drain, 1);
945         do {
946                 if (atomic_read(&blkif->inflight) == 0)
947                         break;
948                 wait_for_completion_interruptible_timeout(
949                                 &blkif->drain_complete, HZ);
950
951                 if (!atomic_read(&blkif->drain))
952                         break;
953         } while (!kthread_should_stop());
954         atomic_set(&blkif->drain, 0);
955 }
956
957 /*
958  * Completion callback on the bio's. Called as bh->b_end_io()
959  */
960
961 static void __end_block_io_op(struct pending_req *pending_req, int error)
962 {
963         /* An error fails the entire request. */
964         if ((pending_req->operation == BLKIF_OP_FLUSH_DISKCACHE) &&
965             (error == -EOPNOTSUPP)) {
966                 pr_debug(DRV_PFX "flush diskcache op failed, not supported\n");
967                 xen_blkbk_flush_diskcache(XBT_NIL, pending_req->blkif->be, 0);
968                 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
969         } else if ((pending_req->operation == BLKIF_OP_WRITE_BARRIER) &&
970                     (error == -EOPNOTSUPP)) {
971                 pr_debug(DRV_PFX "write barrier op failed, not supported\n");
972                 xen_blkbk_barrier(XBT_NIL, pending_req->blkif->be, 0);
973                 pending_req->status = BLKIF_RSP_EOPNOTSUPP;
974         } else if (error) {
975                 pr_debug(DRV_PFX "Buffer not up-to-date at end of operation,"
976                          " error=%d\n", error);
977                 pending_req->status = BLKIF_RSP_ERROR;
978         }
979
980         /*
981          * If all of the bio's have completed it is time to unmap
982          * the grant references associated with 'request' and provide
983          * the proper response on the ring.
984          */
985         if (atomic_dec_and_test(&pending_req->pendcnt)) {
986                 struct xen_blkif *blkif = pending_req->blkif;
987
988                 xen_blkbk_unmap(blkif,
989                                 pending_req->segments,
990                                 pending_req->nr_pages);
991                 make_response(blkif, pending_req->id,
992                               pending_req->operation, pending_req->status);
993                 free_req(blkif, pending_req);
994                 /*
995                  * Make sure the request is freed before releasing blkif,
996                  * or there could be a race between free_req and the
997                  * cleanup done in xen_blkif_free during shutdown.
998                  *
999                  * NB: The fact that we might try to wake up pending_free_wq
1000                  * before drain_complete (in case there's a drain going on)
1001                  * it's not a problem with our current implementation
1002                  * because we can assure there's no thread waiting on
1003                  * pending_free_wq if there's a drain going on, but it has
1004                  * to be taken into account if the current model is changed.
1005                  */
1006                 if (atomic_dec_and_test(&blkif->inflight) && atomic_read(&blkif->drain)) {
1007                         complete(&blkif->drain_complete);
1008                 }
1009                 xen_blkif_put(blkif);
1010         }
1011 }
1012
1013 /*
1014  * bio callback.
1015  */
1016 static void end_block_io_op(struct bio *bio, int error)
1017 {
1018         __end_block_io_op(bio->bi_private, error);
1019         bio_put(bio);
1020 }
1021
1022
1023
1024 /*
1025  * Function to copy the from the ring buffer the 'struct blkif_request'
1026  * (which has the sectors we want, number of them, grant references, etc),
1027  * and transmute  it to the block API to hand it over to the proper block disk.
1028  */
1029 static int
1030 __do_block_io_op(struct xen_blkif *blkif)
1031 {
1032         union blkif_back_rings *blk_rings = &blkif->blk_rings;
1033         struct blkif_request req;
1034         struct pending_req *pending_req;
1035         RING_IDX rc, rp;
1036         int more_to_do = 0;
1037
1038         rc = blk_rings->common.req_cons;
1039         rp = blk_rings->common.sring->req_prod;
1040         rmb(); /* Ensure we see queued requests up to 'rp'. */
1041
1042         if (RING_REQUEST_PROD_OVERFLOW(&blk_rings->common, rp)) {
1043                 rc = blk_rings->common.rsp_prod_pvt;
1044                 pr_warn(DRV_PFX "Frontend provided bogus ring requests (%d - %d = %d). Halting ring processing on dev=%04x\n",
1045                         rp, rc, rp - rc, blkif->vbd.pdevice);
1046                 return -EACCES;
1047         }
1048         while (rc != rp) {
1049
1050                 if (RING_REQUEST_CONS_OVERFLOW(&blk_rings->common, rc))
1051                         break;
1052
1053                 if (kthread_should_stop()) {
1054                         more_to_do = 1;
1055                         break;
1056                 }
1057
1058                 pending_req = alloc_req(blkif);
1059                 if (NULL == pending_req) {
1060                         blkif->st_oo_req++;
1061                         more_to_do = 1;
1062                         break;
1063                 }
1064
1065                 switch (blkif->blk_protocol) {
1066                 case BLKIF_PROTOCOL_NATIVE:
1067                         memcpy(&req, RING_GET_REQUEST(&blk_rings->native, rc), sizeof(req));
1068                         break;
1069                 case BLKIF_PROTOCOL_X86_32:
1070                         blkif_get_x86_32_req(&req, RING_GET_REQUEST(&blk_rings->x86_32, rc));
1071                         break;
1072                 case BLKIF_PROTOCOL_X86_64:
1073                         blkif_get_x86_64_req(&req, RING_GET_REQUEST(&blk_rings->x86_64, rc));
1074                         break;
1075                 default:
1076                         BUG();
1077                 }
1078                 blk_rings->common.req_cons = ++rc; /* before make_response() */
1079
1080                 /* Apply all sanity checks to /private copy/ of request. */
1081                 barrier();
1082
1083                 switch (req.operation) {
1084                 case BLKIF_OP_READ:
1085                 case BLKIF_OP_WRITE:
1086                 case BLKIF_OP_WRITE_BARRIER:
1087                 case BLKIF_OP_FLUSH_DISKCACHE:
1088                 case BLKIF_OP_INDIRECT:
1089                         if (dispatch_rw_block_io(blkif, &req, pending_req))
1090                                 goto done;
1091                         break;
1092                 case BLKIF_OP_DISCARD:
1093                         free_req(blkif, pending_req);
1094                         if (dispatch_discard_io(blkif, &req))
1095                                 goto done;
1096                         break;
1097                 default:
1098                         if (dispatch_other_io(blkif, &req, pending_req))
1099                                 goto done;
1100                         break;
1101                 }
1102
1103                 /* Yield point for this unbounded loop. */
1104                 cond_resched();
1105         }
1106 done:
1107         return more_to_do;
1108 }
1109
1110 static int
1111 do_block_io_op(struct xen_blkif *blkif)
1112 {
1113         union blkif_back_rings *blk_rings = &blkif->blk_rings;
1114         int more_to_do;
1115
1116         do {
1117                 more_to_do = __do_block_io_op(blkif);
1118                 if (more_to_do)
1119                         break;
1120
1121                 RING_FINAL_CHECK_FOR_REQUESTS(&blk_rings->common, more_to_do);
1122         } while (more_to_do);
1123
1124         return more_to_do;
1125 }
1126 /*
1127  * Transmutation of the 'struct blkif_request' to a proper 'struct bio'
1128  * and call the 'submit_bio' to pass it to the underlying storage.
1129  */
1130 static int dispatch_rw_block_io(struct xen_blkif *blkif,
1131                                 struct blkif_request *req,
1132                                 struct pending_req *pending_req)
1133 {
1134         struct phys_req preq;
1135         struct seg_buf *seg = pending_req->seg;
1136         unsigned int nseg;
1137         struct bio *bio = NULL;
1138         struct bio **biolist = pending_req->biolist;
1139         int i, nbio = 0;
1140         int operation;
1141         struct blk_plug plug;
1142         bool drain = false;
1143         struct grant_page **pages = pending_req->segments;
1144         unsigned short req_operation;
1145
1146         req_operation = req->operation == BLKIF_OP_INDIRECT ?
1147                         req->u.indirect.indirect_op : req->operation;
1148         if ((req->operation == BLKIF_OP_INDIRECT) &&
1149             (req_operation != BLKIF_OP_READ) &&
1150             (req_operation != BLKIF_OP_WRITE)) {
1151                 pr_debug(DRV_PFX "Invalid indirect operation (%u)\n",
1152                          req_operation);
1153                 goto fail_response;
1154         }
1155
1156         switch (req_operation) {
1157         case BLKIF_OP_READ:
1158                 blkif->st_rd_req++;
1159                 operation = READ;
1160                 break;
1161         case BLKIF_OP_WRITE:
1162                 blkif->st_wr_req++;
1163                 operation = WRITE_ODIRECT;
1164                 break;
1165         case BLKIF_OP_WRITE_BARRIER:
1166                 drain = true;
1167         case BLKIF_OP_FLUSH_DISKCACHE:
1168                 blkif->st_f_req++;
1169                 operation = WRITE_FLUSH;
1170                 break;
1171         default:
1172                 operation = 0; /* make gcc happy */
1173                 goto fail_response;
1174                 break;
1175         }
1176
1177         /* Check that the number of segments is sane. */
1178         nseg = req->operation == BLKIF_OP_INDIRECT ?
1179                req->u.indirect.nr_segments : req->u.rw.nr_segments;
1180
1181         if (unlikely(nseg == 0 && operation != WRITE_FLUSH) ||
1182             unlikely((req->operation != BLKIF_OP_INDIRECT) &&
1183                      (nseg > BLKIF_MAX_SEGMENTS_PER_REQUEST)) ||
1184             unlikely((req->operation == BLKIF_OP_INDIRECT) &&
1185                      (nseg > MAX_INDIRECT_SEGMENTS))) {
1186                 pr_debug(DRV_PFX "Bad number of segments in request (%d)\n",
1187                          nseg);
1188                 /* Haven't submitted any bio's yet. */
1189                 goto fail_response;
1190         }
1191
1192         preq.nr_sects      = 0;
1193
1194         pending_req->blkif     = blkif;
1195         pending_req->id        = req->u.rw.id;
1196         pending_req->operation = req_operation;
1197         pending_req->status    = BLKIF_RSP_OKAY;
1198         pending_req->nr_pages  = nseg;
1199
1200         if (req->operation != BLKIF_OP_INDIRECT) {
1201                 preq.dev               = req->u.rw.handle;
1202                 preq.sector_number     = req->u.rw.sector_number;
1203                 for (i = 0; i < nseg; i++) {
1204                         pages[i]->gref = req->u.rw.seg[i].gref;
1205                         seg[i].nsec = req->u.rw.seg[i].last_sect -
1206                                 req->u.rw.seg[i].first_sect + 1;
1207                         seg[i].offset = (req->u.rw.seg[i].first_sect << 9);
1208                         if ((req->u.rw.seg[i].last_sect >= (PAGE_SIZE >> 9)) ||
1209                             (req->u.rw.seg[i].last_sect <
1210                              req->u.rw.seg[i].first_sect))
1211                                 goto fail_response;
1212                         preq.nr_sects += seg[i].nsec;
1213                 }
1214         } else {
1215                 preq.dev               = req->u.indirect.handle;
1216                 preq.sector_number     = req->u.indirect.sector_number;
1217                 if (xen_blkbk_parse_indirect(req, pending_req, seg, &preq))
1218                         goto fail_response;
1219         }
1220
1221         if (xen_vbd_translate(&preq, blkif, operation) != 0) {
1222                 pr_debug(DRV_PFX "access denied: %s of [%llu,%llu] on dev=%04x\n",
1223                          operation == READ ? "read" : "write",
1224                          preq.sector_number,
1225                          preq.sector_number + preq.nr_sects,
1226                          blkif->vbd.pdevice);
1227                 goto fail_response;
1228         }
1229
1230         /*
1231          * This check _MUST_ be done after xen_vbd_translate as the preq.bdev
1232          * is set there.
1233          */
1234         for (i = 0; i < nseg; i++) {
1235                 if (((int)preq.sector_number|(int)seg[i].nsec) &
1236                     ((bdev_logical_block_size(preq.bdev) >> 9) - 1)) {
1237                         pr_debug(DRV_PFX "Misaligned I/O request from domain %d",
1238                                  blkif->domid);
1239                         goto fail_response;
1240                 }
1241         }
1242
1243         /* Wait on all outstanding I/O's and once that has been completed
1244          * issue the WRITE_FLUSH.
1245          */
1246         if (drain)
1247                 xen_blk_drain_io(pending_req->blkif);
1248
1249         /*
1250          * If we have failed at this point, we need to undo the M2P override,
1251          * set gnttab_set_unmap_op on all of the grant references and perform
1252          * the hypercall to unmap the grants - that is all done in
1253          * xen_blkbk_unmap.
1254          */
1255         if (xen_blkbk_map_seg(pending_req))
1256                 goto fail_flush;
1257
1258         /*
1259          * This corresponding xen_blkif_put is done in __end_block_io_op, or
1260          * below (in "!bio") if we are handling a BLKIF_OP_DISCARD.
1261          */
1262         xen_blkif_get(blkif);
1263         atomic_inc(&blkif->inflight);
1264
1265         for (i = 0; i < nseg; i++) {
1266                 while ((bio == NULL) ||
1267                        (bio_add_page(bio,
1268                                      pages[i]->page,
1269                                      seg[i].nsec << 9,
1270                                      seg[i].offset) == 0)) {
1271
1272                         int nr_iovecs = min_t(int, (nseg-i), BIO_MAX_PAGES);
1273                         bio = bio_alloc(GFP_KERNEL, nr_iovecs);
1274                         if (unlikely(bio == NULL))
1275                                 goto fail_put_bio;
1276
1277                         biolist[nbio++] = bio;
1278                         bio->bi_bdev    = preq.bdev;
1279                         bio->bi_private = pending_req;
1280                         bio->bi_end_io  = end_block_io_op;
1281                         bio->bi_sector  = preq.sector_number;
1282                 }
1283
1284                 preq.sector_number += seg[i].nsec;
1285         }
1286
1287         /* This will be hit if the operation was a flush or discard. */
1288         if (!bio) {
1289                 BUG_ON(operation != WRITE_FLUSH);
1290
1291                 bio = bio_alloc(GFP_KERNEL, 0);
1292                 if (unlikely(bio == NULL))
1293                         goto fail_put_bio;
1294
1295                 biolist[nbio++] = bio;
1296                 bio->bi_bdev    = preq.bdev;
1297                 bio->bi_private = pending_req;
1298                 bio->bi_end_io  = end_block_io_op;
1299         }
1300
1301         atomic_set(&pending_req->pendcnt, nbio);
1302         blk_start_plug(&plug);
1303
1304         for (i = 0; i < nbio; i++)
1305                 submit_bio(operation, biolist[i]);
1306
1307         /* Let the I/Os go.. */
1308         blk_finish_plug(&plug);
1309
1310         if (operation == READ)
1311                 blkif->st_rd_sect += preq.nr_sects;
1312         else if (operation & WRITE)
1313                 blkif->st_wr_sect += preq.nr_sects;
1314
1315         return 0;
1316
1317  fail_flush:
1318         xen_blkbk_unmap(blkif, pending_req->segments,
1319                         pending_req->nr_pages);
1320  fail_response:
1321         /* Haven't submitted any bio's yet. */
1322         make_response(blkif, req->u.rw.id, req_operation, BLKIF_RSP_ERROR);
1323         free_req(blkif, pending_req);
1324         msleep(1); /* back off a bit */
1325         return -EIO;
1326
1327  fail_put_bio:
1328         for (i = 0; i < nbio; i++)
1329                 bio_put(biolist[i]);
1330         atomic_set(&pending_req->pendcnt, 1);
1331         __end_block_io_op(pending_req, -EINVAL);
1332         msleep(1); /* back off a bit */
1333         return -EIO;
1334 }
1335
1336
1337
1338 /*
1339  * Put a response on the ring on how the operation fared.
1340  */
1341 static void make_response(struct xen_blkif *blkif, u64 id,
1342                           unsigned short op, int st)
1343 {
1344         struct blkif_response  resp;
1345         unsigned long     flags;
1346         union blkif_back_rings *blk_rings = &blkif->blk_rings;
1347         int notify;
1348
1349         resp.id        = id;
1350         resp.operation = op;
1351         resp.status    = st;
1352
1353         spin_lock_irqsave(&blkif->blk_ring_lock, flags);
1354         /* Place on the response ring for the relevant domain. */
1355         switch (blkif->blk_protocol) {
1356         case BLKIF_PROTOCOL_NATIVE:
1357                 memcpy(RING_GET_RESPONSE(&blk_rings->native, blk_rings->native.rsp_prod_pvt),
1358                        &resp, sizeof(resp));
1359                 break;
1360         case BLKIF_PROTOCOL_X86_32:
1361                 memcpy(RING_GET_RESPONSE(&blk_rings->x86_32, blk_rings->x86_32.rsp_prod_pvt),
1362                        &resp, sizeof(resp));
1363                 break;
1364         case BLKIF_PROTOCOL_X86_64:
1365                 memcpy(RING_GET_RESPONSE(&blk_rings->x86_64, blk_rings->x86_64.rsp_prod_pvt),
1366                        &resp, sizeof(resp));
1367                 break;
1368         default:
1369                 BUG();
1370         }
1371         blk_rings->common.rsp_prod_pvt++;
1372         RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blk_rings->common, notify);
1373         spin_unlock_irqrestore(&blkif->blk_ring_lock, flags);
1374         if (notify)
1375                 notify_remote_via_irq(blkif->irq);
1376 }
1377
1378 static int __init xen_blkif_init(void)
1379 {
1380         int rc = 0;
1381
1382         if (!xen_domain())
1383                 return -ENODEV;
1384
1385         rc = xen_blkif_interface_init();
1386         if (rc)
1387                 goto failed_init;
1388
1389         rc = xen_blkif_xenbus_init();
1390         if (rc)
1391                 goto failed_init;
1392
1393  failed_init:
1394         return rc;
1395 }
1396
1397 module_init(xen_blkif_init);
1398
1399 MODULE_LICENSE("Dual BSD/GPL");
1400 MODULE_ALIAS("xen-backend:vbd");