]> Pileus Git - ~andy/linux/blob - drivers/block/rbd.c
rbd: drop obj_request->version
[~andy/linux] / drivers / block / rbd.c
1
2 /*
3    rbd.c -- Export ceph rados objects as a Linux block device
4
5
6    based on drivers/block/osdblk.c:
7
8    Copyright 2009 Red Hat, Inc.
9
10    This program is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation.
13
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18
19    You should have received a copy of the GNU General Public License
20    along with this program; see the file COPYING.  If not, write to
21    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22
23
24
25    For usage instructions, please refer to:
26
27                  Documentation/ABI/testing/sysfs-bus-rbd
28
29  */
30
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36
37 #include <linux/kernel.h>
38 #include <linux/device.h>
39 #include <linux/module.h>
40 #include <linux/fs.h>
41 #include <linux/blkdev.h>
42
43 #include "rbd_types.h"
44
45 #define RBD_DEBUG       /* Activate rbd_assert() calls */
46
47 /*
48  * The basic unit of block I/O is a sector.  It is interpreted in a
49  * number of contexts in Linux (blk, bio, genhd), but the default is
50  * universally 512 bytes.  These symbols are just slightly more
51  * meaningful than the bare numbers they represent.
52  */
53 #define SECTOR_SHIFT    9
54 #define SECTOR_SIZE     (1ULL << SECTOR_SHIFT)
55
56 #define RBD_DRV_NAME "rbd"
57 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
58
59 #define RBD_MINORS_PER_MAJOR    256             /* max minors per blkdev */
60
61 #define RBD_SNAP_DEV_NAME_PREFIX        "snap_"
62 #define RBD_MAX_SNAP_NAME_LEN   \
63                         (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
64
65 #define RBD_MAX_SNAP_COUNT      510     /* allows max snapc to fit in 4KB */
66
67 #define RBD_SNAP_HEAD_NAME      "-"
68
69 /* This allows a single page to hold an image name sent by OSD */
70 #define RBD_IMAGE_NAME_LEN_MAX  (PAGE_SIZE - sizeof (__le32) - 1)
71 #define RBD_IMAGE_ID_LEN_MAX    64
72
73 #define RBD_OBJ_PREFIX_LEN_MAX  64
74
75 /* Feature bits */
76
77 #define RBD_FEATURE_LAYERING    (1<<0)
78 #define RBD_FEATURE_STRIPINGV2  (1<<1)
79 #define RBD_FEATURES_ALL \
80             (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
81
82 /* Features supported by this (client software) implementation. */
83
84 #define RBD_FEATURES_SUPPORTED  (RBD_FEATURES_ALL)
85
86 /*
87  * An RBD device name will be "rbd#", where the "rbd" comes from
88  * RBD_DRV_NAME above, and # is a unique integer identifier.
89  * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
90  * enough to hold all possible device names.
91  */
92 #define DEV_NAME_LEN            32
93 #define MAX_INT_FORMAT_WIDTH    ((5 * sizeof (int)) / 2 + 1)
94
95 /*
96  * block device image metadata (in-memory version)
97  */
98 struct rbd_image_header {
99         /* These four fields never change for a given rbd image */
100         char *object_prefix;
101         u64 features;
102         __u8 obj_order;
103         __u8 crypt_type;
104         __u8 comp_type;
105
106         /* The remaining fields need to be updated occasionally */
107         u64 image_size;
108         struct ceph_snap_context *snapc;
109         char *snap_names;
110         u64 *snap_sizes;
111
112         u64 stripe_unit;
113         u64 stripe_count;
114 };
115
116 /*
117  * An rbd image specification.
118  *
119  * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
120  * identify an image.  Each rbd_dev structure includes a pointer to
121  * an rbd_spec structure that encapsulates this identity.
122  *
123  * Each of the id's in an rbd_spec has an associated name.  For a
124  * user-mapped image, the names are supplied and the id's associated
125  * with them are looked up.  For a layered image, a parent image is
126  * defined by the tuple, and the names are looked up.
127  *
128  * An rbd_dev structure contains a parent_spec pointer which is
129  * non-null if the image it represents is a child in a layered
130  * image.  This pointer will refer to the rbd_spec structure used
131  * by the parent rbd_dev for its own identity (i.e., the structure
132  * is shared between the parent and child).
133  *
134  * Since these structures are populated once, during the discovery
135  * phase of image construction, they are effectively immutable so
136  * we make no effort to synchronize access to them.
137  *
138  * Note that code herein does not assume the image name is known (it
139  * could be a null pointer).
140  */
141 struct rbd_spec {
142         u64             pool_id;
143         const char      *pool_name;
144
145         const char      *image_id;
146         const char      *image_name;
147
148         u64             snap_id;
149         const char      *snap_name;
150
151         struct kref     kref;
152 };
153
154 /*
155  * an instance of the client.  multiple devices may share an rbd client.
156  */
157 struct rbd_client {
158         struct ceph_client      *client;
159         struct kref             kref;
160         struct list_head        node;
161 };
162
163 struct rbd_img_request;
164 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
165
166 #define BAD_WHICH       U32_MAX         /* Good which or bad which, which? */
167
168 struct rbd_obj_request;
169 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
170
171 enum obj_request_type {
172         OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
173 };
174
175 enum obj_req_flags {
176         OBJ_REQ_DONE,           /* completion flag: not done = 0, done = 1 */
177         OBJ_REQ_IMG_DATA,       /* object usage: standalone = 0, image = 1 */
178         OBJ_REQ_KNOWN,          /* EXISTS flag valid: no = 0, yes = 1 */
179         OBJ_REQ_EXISTS,         /* target exists: no = 0, yes = 1 */
180 };
181
182 struct rbd_obj_request {
183         const char              *object_name;
184         u64                     offset;         /* object start byte */
185         u64                     length;         /* bytes from offset */
186         unsigned long           flags;
187
188         /*
189          * An object request associated with an image will have its
190          * img_data flag set; a standalone object request will not.
191          *
192          * A standalone object request will have which == BAD_WHICH
193          * and a null obj_request pointer.
194          *
195          * An object request initiated in support of a layered image
196          * object (to check for its existence before a write) will
197          * have which == BAD_WHICH and a non-null obj_request pointer.
198          *
199          * Finally, an object request for rbd image data will have
200          * which != BAD_WHICH, and will have a non-null img_request
201          * pointer.  The value of which will be in the range
202          * 0..(img_request->obj_request_count-1).
203          */
204         union {
205                 struct rbd_obj_request  *obj_request;   /* STAT op */
206                 struct {
207                         struct rbd_img_request  *img_request;
208                         u64                     img_offset;
209                         /* links for img_request->obj_requests list */
210                         struct list_head        links;
211                 };
212         };
213         u32                     which;          /* posn image request list */
214
215         enum obj_request_type   type;
216         union {
217                 struct bio      *bio_list;
218                 struct {
219                         struct page     **pages;
220                         u32             page_count;
221                 };
222         };
223         struct page             **copyup_pages;
224
225         struct ceph_osd_request *osd_req;
226
227         u64                     xferred;        /* bytes transferred */
228         int                     result;
229
230         rbd_obj_callback_t      callback;
231         struct completion       completion;
232
233         struct kref             kref;
234 };
235
236 enum img_req_flags {
237         IMG_REQ_WRITE,          /* I/O direction: read = 0, write = 1 */
238         IMG_REQ_CHILD,          /* initiator: block = 0, child image = 1 */
239         IMG_REQ_LAYERED,        /* ENOENT handling: normal = 0, layered = 1 */
240 };
241
242 struct rbd_img_request {
243         struct rbd_device       *rbd_dev;
244         u64                     offset; /* starting image byte offset */
245         u64                     length; /* byte count from offset */
246         unsigned long           flags;
247         union {
248                 u64                     snap_id;        /* for reads */
249                 struct ceph_snap_context *snapc;        /* for writes */
250         };
251         union {
252                 struct request          *rq;            /* block request */
253                 struct rbd_obj_request  *obj_request;   /* obj req initiator */
254         };
255         struct page             **copyup_pages;
256         spinlock_t              completion_lock;/* protects next_completion */
257         u32                     next_completion;
258         rbd_img_callback_t      callback;
259         u64                     xferred;/* aggregate bytes transferred */
260         int                     result; /* first nonzero obj_request result */
261
262         u32                     obj_request_count;
263         struct list_head        obj_requests;   /* rbd_obj_request structs */
264
265         struct kref             kref;
266 };
267
268 #define for_each_obj_request(ireq, oreq) \
269         list_for_each_entry(oreq, &(ireq)->obj_requests, links)
270 #define for_each_obj_request_from(ireq, oreq) \
271         list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
272 #define for_each_obj_request_safe(ireq, oreq, n) \
273         list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
274
275 struct rbd_snap {
276         const char              *name;
277         u64                     size;
278         struct list_head        node;
279         u64                     id;
280         u64                     features;
281 };
282
283 struct rbd_mapping {
284         u64                     size;
285         u64                     features;
286         bool                    read_only;
287 };
288
289 /*
290  * a single device
291  */
292 struct rbd_device {
293         int                     dev_id;         /* blkdev unique id */
294
295         int                     major;          /* blkdev assigned major */
296         struct gendisk          *disk;          /* blkdev's gendisk and rq */
297
298         u32                     image_format;   /* Either 1 or 2 */
299         struct rbd_client       *rbd_client;
300
301         char                    name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
302
303         spinlock_t              lock;           /* queue, flags, open_count */
304
305         struct rbd_image_header header;
306         unsigned long           flags;          /* possibly lock protected */
307         struct rbd_spec         *spec;
308
309         char                    *header_name;
310
311         struct ceph_file_layout layout;
312
313         struct ceph_osd_event   *watch_event;
314         struct rbd_obj_request  *watch_request;
315
316         struct rbd_spec         *parent_spec;
317         u64                     parent_overlap;
318         struct rbd_device       *parent;
319
320         /* protects updating the header */
321         struct rw_semaphore     header_rwsem;
322
323         struct rbd_mapping      mapping;
324
325         struct list_head        node;
326
327         /* list of snapshots */
328         struct list_head        snaps;
329
330         /* sysfs related */
331         struct device           dev;
332         unsigned long           open_count;     /* protected by lock */
333 };
334
335 /*
336  * Flag bits for rbd_dev->flags.  If atomicity is required,
337  * rbd_dev->lock is used to protect access.
338  *
339  * Currently, only the "removing" flag (which is coupled with the
340  * "open_count" field) requires atomic access.
341  */
342 enum rbd_dev_flags {
343         RBD_DEV_FLAG_EXISTS,    /* mapped snapshot has not been deleted */
344         RBD_DEV_FLAG_REMOVING,  /* this mapping is being removed */
345 };
346
347 static DEFINE_MUTEX(ctl_mutex);   /* Serialize open/close/setup/teardown */
348
349 static LIST_HEAD(rbd_dev_list);    /* devices */
350 static DEFINE_SPINLOCK(rbd_dev_list_lock);
351
352 static LIST_HEAD(rbd_client_list);              /* clients */
353 static DEFINE_SPINLOCK(rbd_client_list_lock);
354
355 static int rbd_img_request_submit(struct rbd_img_request *img_request);
356
357 static int rbd_dev_snaps_update(struct rbd_device *rbd_dev);
358
359 static void rbd_dev_device_release(struct device *dev);
360 static void rbd_snap_destroy(struct rbd_snap *snap);
361
362 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
363                        size_t count);
364 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
365                           size_t count);
366 static int rbd_dev_image_probe(struct rbd_device *rbd_dev);
367
368 static struct bus_attribute rbd_bus_attrs[] = {
369         __ATTR(add, S_IWUSR, NULL, rbd_add),
370         __ATTR(remove, S_IWUSR, NULL, rbd_remove),
371         __ATTR_NULL
372 };
373
374 static struct bus_type rbd_bus_type = {
375         .name           = "rbd",
376         .bus_attrs      = rbd_bus_attrs,
377 };
378
379 static void rbd_root_dev_release(struct device *dev)
380 {
381 }
382
383 static struct device rbd_root_dev = {
384         .init_name =    "rbd",
385         .release =      rbd_root_dev_release,
386 };
387
388 static __printf(2, 3)
389 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
390 {
391         struct va_format vaf;
392         va_list args;
393
394         va_start(args, fmt);
395         vaf.fmt = fmt;
396         vaf.va = &args;
397
398         if (!rbd_dev)
399                 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
400         else if (rbd_dev->disk)
401                 printk(KERN_WARNING "%s: %s: %pV\n",
402                         RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
403         else if (rbd_dev->spec && rbd_dev->spec->image_name)
404                 printk(KERN_WARNING "%s: image %s: %pV\n",
405                         RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
406         else if (rbd_dev->spec && rbd_dev->spec->image_id)
407                 printk(KERN_WARNING "%s: id %s: %pV\n",
408                         RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
409         else    /* punt */
410                 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
411                         RBD_DRV_NAME, rbd_dev, &vaf);
412         va_end(args);
413 }
414
415 #ifdef RBD_DEBUG
416 #define rbd_assert(expr)                                                \
417                 if (unlikely(!(expr))) {                                \
418                         printk(KERN_ERR "\nAssertion failure in %s() "  \
419                                                 "at line %d:\n\n"       \
420                                         "\trbd_assert(%s);\n\n",        \
421                                         __func__, __LINE__, #expr);     \
422                         BUG();                                          \
423                 }
424 #else /* !RBD_DEBUG */
425 #  define rbd_assert(expr)      ((void) 0)
426 #endif /* !RBD_DEBUG */
427
428 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
429 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
430 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
431
432 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
433 static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev);
434
435 static int rbd_open(struct block_device *bdev, fmode_t mode)
436 {
437         struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
438         bool removing = false;
439
440         if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
441                 return -EROFS;
442
443         spin_lock_irq(&rbd_dev->lock);
444         if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
445                 removing = true;
446         else
447                 rbd_dev->open_count++;
448         spin_unlock_irq(&rbd_dev->lock);
449         if (removing)
450                 return -ENOENT;
451
452         mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
453         (void) get_device(&rbd_dev->dev);
454         set_device_ro(bdev, rbd_dev->mapping.read_only);
455         mutex_unlock(&ctl_mutex);
456
457         return 0;
458 }
459
460 static int rbd_release(struct gendisk *disk, fmode_t mode)
461 {
462         struct rbd_device *rbd_dev = disk->private_data;
463         unsigned long open_count_before;
464
465         spin_lock_irq(&rbd_dev->lock);
466         open_count_before = rbd_dev->open_count--;
467         spin_unlock_irq(&rbd_dev->lock);
468         rbd_assert(open_count_before > 0);
469
470         mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
471         put_device(&rbd_dev->dev);
472         mutex_unlock(&ctl_mutex);
473
474         return 0;
475 }
476
477 static const struct block_device_operations rbd_bd_ops = {
478         .owner                  = THIS_MODULE,
479         .open                   = rbd_open,
480         .release                = rbd_release,
481 };
482
483 /*
484  * Initialize an rbd client instance.
485  * We own *ceph_opts.
486  */
487 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
488 {
489         struct rbd_client *rbdc;
490         int ret = -ENOMEM;
491
492         dout("%s:\n", __func__);
493         rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
494         if (!rbdc)
495                 goto out_opt;
496
497         kref_init(&rbdc->kref);
498         INIT_LIST_HEAD(&rbdc->node);
499
500         mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
501
502         rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
503         if (IS_ERR(rbdc->client))
504                 goto out_mutex;
505         ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
506
507         ret = ceph_open_session(rbdc->client);
508         if (ret < 0)
509                 goto out_err;
510
511         spin_lock(&rbd_client_list_lock);
512         list_add_tail(&rbdc->node, &rbd_client_list);
513         spin_unlock(&rbd_client_list_lock);
514
515         mutex_unlock(&ctl_mutex);
516         dout("%s: rbdc %p\n", __func__, rbdc);
517
518         return rbdc;
519
520 out_err:
521         ceph_destroy_client(rbdc->client);
522 out_mutex:
523         mutex_unlock(&ctl_mutex);
524         kfree(rbdc);
525 out_opt:
526         if (ceph_opts)
527                 ceph_destroy_options(ceph_opts);
528         dout("%s: error %d\n", __func__, ret);
529
530         return ERR_PTR(ret);
531 }
532
533 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
534 {
535         kref_get(&rbdc->kref);
536
537         return rbdc;
538 }
539
540 /*
541  * Find a ceph client with specific addr and configuration.  If
542  * found, bump its reference count.
543  */
544 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
545 {
546         struct rbd_client *client_node;
547         bool found = false;
548
549         if (ceph_opts->flags & CEPH_OPT_NOSHARE)
550                 return NULL;
551
552         spin_lock(&rbd_client_list_lock);
553         list_for_each_entry(client_node, &rbd_client_list, node) {
554                 if (!ceph_compare_options(ceph_opts, client_node->client)) {
555                         __rbd_get_client(client_node);
556
557                         found = true;
558                         break;
559                 }
560         }
561         spin_unlock(&rbd_client_list_lock);
562
563         return found ? client_node : NULL;
564 }
565
566 /*
567  * mount options
568  */
569 enum {
570         Opt_last_int,
571         /* int args above */
572         Opt_last_string,
573         /* string args above */
574         Opt_read_only,
575         Opt_read_write,
576         /* Boolean args above */
577         Opt_last_bool,
578 };
579
580 static match_table_t rbd_opts_tokens = {
581         /* int args above */
582         /* string args above */
583         {Opt_read_only, "read_only"},
584         {Opt_read_only, "ro"},          /* Alternate spelling */
585         {Opt_read_write, "read_write"},
586         {Opt_read_write, "rw"},         /* Alternate spelling */
587         /* Boolean args above */
588         {-1, NULL}
589 };
590
591 struct rbd_options {
592         bool    read_only;
593 };
594
595 #define RBD_READ_ONLY_DEFAULT   false
596
597 static int parse_rbd_opts_token(char *c, void *private)
598 {
599         struct rbd_options *rbd_opts = private;
600         substring_t argstr[MAX_OPT_ARGS];
601         int token, intval, ret;
602
603         token = match_token(c, rbd_opts_tokens, argstr);
604         if (token < 0)
605                 return -EINVAL;
606
607         if (token < Opt_last_int) {
608                 ret = match_int(&argstr[0], &intval);
609                 if (ret < 0) {
610                         pr_err("bad mount option arg (not int) "
611                                "at '%s'\n", c);
612                         return ret;
613                 }
614                 dout("got int token %d val %d\n", token, intval);
615         } else if (token > Opt_last_int && token < Opt_last_string) {
616                 dout("got string token %d val %s\n", token,
617                      argstr[0].from);
618         } else if (token > Opt_last_string && token < Opt_last_bool) {
619                 dout("got Boolean token %d\n", token);
620         } else {
621                 dout("got token %d\n", token);
622         }
623
624         switch (token) {
625         case Opt_read_only:
626                 rbd_opts->read_only = true;
627                 break;
628         case Opt_read_write:
629                 rbd_opts->read_only = false;
630                 break;
631         default:
632                 rbd_assert(false);
633                 break;
634         }
635         return 0;
636 }
637
638 /*
639  * Get a ceph client with specific addr and configuration, if one does
640  * not exist create it.
641  */
642 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
643 {
644         struct rbd_client *rbdc;
645
646         rbdc = rbd_client_find(ceph_opts);
647         if (rbdc)       /* using an existing client */
648                 ceph_destroy_options(ceph_opts);
649         else
650                 rbdc = rbd_client_create(ceph_opts);
651
652         return rbdc;
653 }
654
655 /*
656  * Destroy ceph client
657  *
658  * Caller must hold rbd_client_list_lock.
659  */
660 static void rbd_client_release(struct kref *kref)
661 {
662         struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
663
664         dout("%s: rbdc %p\n", __func__, rbdc);
665         spin_lock(&rbd_client_list_lock);
666         list_del(&rbdc->node);
667         spin_unlock(&rbd_client_list_lock);
668
669         ceph_destroy_client(rbdc->client);
670         kfree(rbdc);
671 }
672
673 /*
674  * Drop reference to ceph client node. If it's not referenced anymore, release
675  * it.
676  */
677 static void rbd_put_client(struct rbd_client *rbdc)
678 {
679         if (rbdc)
680                 kref_put(&rbdc->kref, rbd_client_release);
681 }
682
683 static bool rbd_image_format_valid(u32 image_format)
684 {
685         return image_format == 1 || image_format == 2;
686 }
687
688 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
689 {
690         size_t size;
691         u32 snap_count;
692
693         /* The header has to start with the magic rbd header text */
694         if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
695                 return false;
696
697         /* The bio layer requires at least sector-sized I/O */
698
699         if (ondisk->options.order < SECTOR_SHIFT)
700                 return false;
701
702         /* If we use u64 in a few spots we may be able to loosen this */
703
704         if (ondisk->options.order > 8 * sizeof (int) - 1)
705                 return false;
706
707         /*
708          * The size of a snapshot header has to fit in a size_t, and
709          * that limits the number of snapshots.
710          */
711         snap_count = le32_to_cpu(ondisk->snap_count);
712         size = SIZE_MAX - sizeof (struct ceph_snap_context);
713         if (snap_count > size / sizeof (__le64))
714                 return false;
715
716         /*
717          * Not only that, but the size of the entire the snapshot
718          * header must also be representable in a size_t.
719          */
720         size -= snap_count * sizeof (__le64);
721         if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
722                 return false;
723
724         return true;
725 }
726
727 /*
728  * Create a new header structure, translate header format from the on-disk
729  * header.
730  */
731 static int rbd_header_from_disk(struct rbd_image_header *header,
732                                  struct rbd_image_header_ondisk *ondisk)
733 {
734         u32 snap_count;
735         size_t len;
736         size_t size;
737         u32 i;
738
739         memset(header, 0, sizeof (*header));
740
741         snap_count = le32_to_cpu(ondisk->snap_count);
742
743         len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix));
744         header->object_prefix = kmalloc(len + 1, GFP_KERNEL);
745         if (!header->object_prefix)
746                 return -ENOMEM;
747         memcpy(header->object_prefix, ondisk->object_prefix, len);
748         header->object_prefix[len] = '\0';
749
750         if (snap_count) {
751                 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
752
753                 /* Save a copy of the snapshot names */
754
755                 if (snap_names_len > (u64) SIZE_MAX)
756                         return -EIO;
757                 header->snap_names = kmalloc(snap_names_len, GFP_KERNEL);
758                 if (!header->snap_names)
759                         goto out_err;
760                 /*
761                  * Note that rbd_dev_v1_header_read() guarantees
762                  * the ondisk buffer we're working with has
763                  * snap_names_len bytes beyond the end of the
764                  * snapshot id array, this memcpy() is safe.
765                  */
766                 memcpy(header->snap_names, &ondisk->snaps[snap_count],
767                         snap_names_len);
768
769                 /* Record each snapshot's size */
770
771                 size = snap_count * sizeof (*header->snap_sizes);
772                 header->snap_sizes = kmalloc(size, GFP_KERNEL);
773                 if (!header->snap_sizes)
774                         goto out_err;
775                 for (i = 0; i < snap_count; i++)
776                         header->snap_sizes[i] =
777                                 le64_to_cpu(ondisk->snaps[i].image_size);
778         } else {
779                 header->snap_names = NULL;
780                 header->snap_sizes = NULL;
781         }
782
783         header->features = 0;   /* No features support in v1 images */
784         header->obj_order = ondisk->options.order;
785         header->crypt_type = ondisk->options.crypt_type;
786         header->comp_type = ondisk->options.comp_type;
787
788         /* Allocate and fill in the snapshot context */
789
790         header->image_size = le64_to_cpu(ondisk->image_size);
791
792         header->snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
793         if (!header->snapc)
794                 goto out_err;
795         header->snapc->seq = le64_to_cpu(ondisk->snap_seq);
796         for (i = 0; i < snap_count; i++)
797                 header->snapc->snaps[i] = le64_to_cpu(ondisk->snaps[i].id);
798
799         return 0;
800
801 out_err:
802         kfree(header->snap_sizes);
803         header->snap_sizes = NULL;
804         kfree(header->snap_names);
805         header->snap_names = NULL;
806         kfree(header->object_prefix);
807         header->object_prefix = NULL;
808
809         return -ENOMEM;
810 }
811
812 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
813 {
814         struct rbd_snap *snap;
815
816         if (snap_id == CEPH_NOSNAP)
817                 return RBD_SNAP_HEAD_NAME;
818
819         list_for_each_entry(snap, &rbd_dev->snaps, node)
820                 if (snap_id == snap->id)
821                         return snap->name;
822
823         return NULL;
824 }
825
826 static struct rbd_snap *snap_by_name(struct rbd_device *rbd_dev,
827                                         const char *snap_name)
828 {
829         struct rbd_snap *snap;
830
831         list_for_each_entry(snap, &rbd_dev->snaps, node)
832                 if (!strcmp(snap_name, snap->name))
833                         return snap;
834
835         return NULL;
836 }
837
838 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
839 {
840         if (!memcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME,
841                     sizeof (RBD_SNAP_HEAD_NAME))) {
842                 rbd_dev->mapping.size = rbd_dev->header.image_size;
843                 rbd_dev->mapping.features = rbd_dev->header.features;
844         } else {
845                 struct rbd_snap *snap;
846
847                 snap = snap_by_name(rbd_dev, rbd_dev->spec->snap_name);
848                 if (!snap)
849                         return -ENOENT;
850                 rbd_dev->mapping.size = snap->size;
851                 rbd_dev->mapping.features = snap->features;
852                 rbd_dev->mapping.read_only = true;
853         }
854
855         return 0;
856 }
857
858 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
859 {
860         rbd_dev->mapping.size = 0;
861         rbd_dev->mapping.features = 0;
862         rbd_dev->mapping.read_only = true;
863 }
864
865 static void rbd_dev_clear_mapping(struct rbd_device *rbd_dev)
866 {
867         rbd_dev->mapping.size = 0;
868         rbd_dev->mapping.features = 0;
869         rbd_dev->mapping.read_only = true;
870 }
871
872 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
873 {
874         char *name;
875         u64 segment;
876         int ret;
877
878         name = kmalloc(MAX_OBJ_NAME_SIZE + 1, GFP_NOIO);
879         if (!name)
880                 return NULL;
881         segment = offset >> rbd_dev->header.obj_order;
882         ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
883                         rbd_dev->header.object_prefix, segment);
884         if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
885                 pr_err("error formatting segment name for #%llu (%d)\n",
886                         segment, ret);
887                 kfree(name);
888                 name = NULL;
889         }
890
891         return name;
892 }
893
894 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
895 {
896         u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
897
898         return offset & (segment_size - 1);
899 }
900
901 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
902                                 u64 offset, u64 length)
903 {
904         u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
905
906         offset &= segment_size - 1;
907
908         rbd_assert(length <= U64_MAX - offset);
909         if (offset + length > segment_size)
910                 length = segment_size - offset;
911
912         return length;
913 }
914
915 /*
916  * returns the size of an object in the image
917  */
918 static u64 rbd_obj_bytes(struct rbd_image_header *header)
919 {
920         return 1 << header->obj_order;
921 }
922
923 /*
924  * bio helpers
925  */
926
927 static void bio_chain_put(struct bio *chain)
928 {
929         struct bio *tmp;
930
931         while (chain) {
932                 tmp = chain;
933                 chain = chain->bi_next;
934                 bio_put(tmp);
935         }
936 }
937
938 /*
939  * zeros a bio chain, starting at specific offset
940  */
941 static void zero_bio_chain(struct bio *chain, int start_ofs)
942 {
943         struct bio_vec *bv;
944         unsigned long flags;
945         void *buf;
946         int i;
947         int pos = 0;
948
949         while (chain) {
950                 bio_for_each_segment(bv, chain, i) {
951                         if (pos + bv->bv_len > start_ofs) {
952                                 int remainder = max(start_ofs - pos, 0);
953                                 buf = bvec_kmap_irq(bv, &flags);
954                                 memset(buf + remainder, 0,
955                                        bv->bv_len - remainder);
956                                 bvec_kunmap_irq(buf, &flags);
957                         }
958                         pos += bv->bv_len;
959                 }
960
961                 chain = chain->bi_next;
962         }
963 }
964
965 /*
966  * similar to zero_bio_chain(), zeros data defined by a page array,
967  * starting at the given byte offset from the start of the array and
968  * continuing up to the given end offset.  The pages array is
969  * assumed to be big enough to hold all bytes up to the end.
970  */
971 static void zero_pages(struct page **pages, u64 offset, u64 end)
972 {
973         struct page **page = &pages[offset >> PAGE_SHIFT];
974
975         rbd_assert(end > offset);
976         rbd_assert(end - offset <= (u64)SIZE_MAX);
977         while (offset < end) {
978                 size_t page_offset;
979                 size_t length;
980                 unsigned long flags;
981                 void *kaddr;
982
983                 page_offset = (size_t)(offset & ~PAGE_MASK);
984                 length = min(PAGE_SIZE - page_offset, (size_t)(end - offset));
985                 local_irq_save(flags);
986                 kaddr = kmap_atomic(*page);
987                 memset(kaddr + page_offset, 0, length);
988                 kunmap_atomic(kaddr);
989                 local_irq_restore(flags);
990
991                 offset += length;
992                 page++;
993         }
994 }
995
996 /*
997  * Clone a portion of a bio, starting at the given byte offset
998  * and continuing for the number of bytes indicated.
999  */
1000 static struct bio *bio_clone_range(struct bio *bio_src,
1001                                         unsigned int offset,
1002                                         unsigned int len,
1003                                         gfp_t gfpmask)
1004 {
1005         struct bio_vec *bv;
1006         unsigned int resid;
1007         unsigned short idx;
1008         unsigned int voff;
1009         unsigned short end_idx;
1010         unsigned short vcnt;
1011         struct bio *bio;
1012
1013         /* Handle the easy case for the caller */
1014
1015         if (!offset && len == bio_src->bi_size)
1016                 return bio_clone(bio_src, gfpmask);
1017
1018         if (WARN_ON_ONCE(!len))
1019                 return NULL;
1020         if (WARN_ON_ONCE(len > bio_src->bi_size))
1021                 return NULL;
1022         if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
1023                 return NULL;
1024
1025         /* Find first affected segment... */
1026
1027         resid = offset;
1028         __bio_for_each_segment(bv, bio_src, idx, 0) {
1029                 if (resid < bv->bv_len)
1030                         break;
1031                 resid -= bv->bv_len;
1032         }
1033         voff = resid;
1034
1035         /* ...and the last affected segment */
1036
1037         resid += len;
1038         __bio_for_each_segment(bv, bio_src, end_idx, idx) {
1039                 if (resid <= bv->bv_len)
1040                         break;
1041                 resid -= bv->bv_len;
1042         }
1043         vcnt = end_idx - idx + 1;
1044
1045         /* Build the clone */
1046
1047         bio = bio_alloc(gfpmask, (unsigned int) vcnt);
1048         if (!bio)
1049                 return NULL;    /* ENOMEM */
1050
1051         bio->bi_bdev = bio_src->bi_bdev;
1052         bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
1053         bio->bi_rw = bio_src->bi_rw;
1054         bio->bi_flags |= 1 << BIO_CLONED;
1055
1056         /*
1057          * Copy over our part of the bio_vec, then update the first
1058          * and last (or only) entries.
1059          */
1060         memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
1061                         vcnt * sizeof (struct bio_vec));
1062         bio->bi_io_vec[0].bv_offset += voff;
1063         if (vcnt > 1) {
1064                 bio->bi_io_vec[0].bv_len -= voff;
1065                 bio->bi_io_vec[vcnt - 1].bv_len = resid;
1066         } else {
1067                 bio->bi_io_vec[0].bv_len = len;
1068         }
1069
1070         bio->bi_vcnt = vcnt;
1071         bio->bi_size = len;
1072         bio->bi_idx = 0;
1073
1074         return bio;
1075 }
1076
1077 /*
1078  * Clone a portion of a bio chain, starting at the given byte offset
1079  * into the first bio in the source chain and continuing for the
1080  * number of bytes indicated.  The result is another bio chain of
1081  * exactly the given length, or a null pointer on error.
1082  *
1083  * The bio_src and offset parameters are both in-out.  On entry they
1084  * refer to the first source bio and the offset into that bio where
1085  * the start of data to be cloned is located.
1086  *
1087  * On return, bio_src is updated to refer to the bio in the source
1088  * chain that contains first un-cloned byte, and *offset will
1089  * contain the offset of that byte within that bio.
1090  */
1091 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1092                                         unsigned int *offset,
1093                                         unsigned int len,
1094                                         gfp_t gfpmask)
1095 {
1096         struct bio *bi = *bio_src;
1097         unsigned int off = *offset;
1098         struct bio *chain = NULL;
1099         struct bio **end;
1100
1101         /* Build up a chain of clone bios up to the limit */
1102
1103         if (!bi || off >= bi->bi_size || !len)
1104                 return NULL;            /* Nothing to clone */
1105
1106         end = &chain;
1107         while (len) {
1108                 unsigned int bi_size;
1109                 struct bio *bio;
1110
1111                 if (!bi) {
1112                         rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1113                         goto out_err;   /* EINVAL; ran out of bio's */
1114                 }
1115                 bi_size = min_t(unsigned int, bi->bi_size - off, len);
1116                 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1117                 if (!bio)
1118                         goto out_err;   /* ENOMEM */
1119
1120                 *end = bio;
1121                 end = &bio->bi_next;
1122
1123                 off += bi_size;
1124                 if (off == bi->bi_size) {
1125                         bi = bi->bi_next;
1126                         off = 0;
1127                 }
1128                 len -= bi_size;
1129         }
1130         *bio_src = bi;
1131         *offset = off;
1132
1133         return chain;
1134 out_err:
1135         bio_chain_put(chain);
1136
1137         return NULL;
1138 }
1139
1140 /*
1141  * The default/initial value for all object request flags is 0.  For
1142  * each flag, once its value is set to 1 it is never reset to 0
1143  * again.
1144  */
1145 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1146 {
1147         if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1148                 struct rbd_device *rbd_dev;
1149
1150                 rbd_dev = obj_request->img_request->rbd_dev;
1151                 rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
1152                         obj_request);
1153         }
1154 }
1155
1156 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1157 {
1158         smp_mb();
1159         return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1160 }
1161
1162 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1163 {
1164         if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1165                 struct rbd_device *rbd_dev = NULL;
1166
1167                 if (obj_request_img_data_test(obj_request))
1168                         rbd_dev = obj_request->img_request->rbd_dev;
1169                 rbd_warn(rbd_dev, "obj_request %p already marked done\n",
1170                         obj_request);
1171         }
1172 }
1173
1174 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1175 {
1176         smp_mb();
1177         return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1178 }
1179
1180 /*
1181  * This sets the KNOWN flag after (possibly) setting the EXISTS
1182  * flag.  The latter is set based on the "exists" value provided.
1183  *
1184  * Note that for our purposes once an object exists it never goes
1185  * away again.  It's possible that the response from two existence
1186  * checks are separated by the creation of the target object, and
1187  * the first ("doesn't exist") response arrives *after* the second
1188  * ("does exist").  In that case we ignore the second one.
1189  */
1190 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1191                                 bool exists)
1192 {
1193         if (exists)
1194                 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1195         set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1196         smp_mb();
1197 }
1198
1199 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1200 {
1201         smp_mb();
1202         return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1203 }
1204
1205 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1206 {
1207         smp_mb();
1208         return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1209 }
1210
1211 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1212 {
1213         dout("%s: obj %p (was %d)\n", __func__, obj_request,
1214                 atomic_read(&obj_request->kref.refcount));
1215         kref_get(&obj_request->kref);
1216 }
1217
1218 static void rbd_obj_request_destroy(struct kref *kref);
1219 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1220 {
1221         rbd_assert(obj_request != NULL);
1222         dout("%s: obj %p (was %d)\n", __func__, obj_request,
1223                 atomic_read(&obj_request->kref.refcount));
1224         kref_put(&obj_request->kref, rbd_obj_request_destroy);
1225 }
1226
1227 static void rbd_img_request_get(struct rbd_img_request *img_request)
1228 {
1229         dout("%s: img %p (was %d)\n", __func__, img_request,
1230                 atomic_read(&img_request->kref.refcount));
1231         kref_get(&img_request->kref);
1232 }
1233
1234 static void rbd_img_request_destroy(struct kref *kref);
1235 static void rbd_img_request_put(struct rbd_img_request *img_request)
1236 {
1237         rbd_assert(img_request != NULL);
1238         dout("%s: img %p (was %d)\n", __func__, img_request,
1239                 atomic_read(&img_request->kref.refcount));
1240         kref_put(&img_request->kref, rbd_img_request_destroy);
1241 }
1242
1243 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1244                                         struct rbd_obj_request *obj_request)
1245 {
1246         rbd_assert(obj_request->img_request == NULL);
1247
1248         /* Image request now owns object's original reference */
1249         obj_request->img_request = img_request;
1250         obj_request->which = img_request->obj_request_count;
1251         rbd_assert(!obj_request_img_data_test(obj_request));
1252         obj_request_img_data_set(obj_request);
1253         rbd_assert(obj_request->which != BAD_WHICH);
1254         img_request->obj_request_count++;
1255         list_add_tail(&obj_request->links, &img_request->obj_requests);
1256         dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1257                 obj_request->which);
1258 }
1259
1260 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1261                                         struct rbd_obj_request *obj_request)
1262 {
1263         rbd_assert(obj_request->which != BAD_WHICH);
1264
1265         dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1266                 obj_request->which);
1267         list_del(&obj_request->links);
1268         rbd_assert(img_request->obj_request_count > 0);
1269         img_request->obj_request_count--;
1270         rbd_assert(obj_request->which == img_request->obj_request_count);
1271         obj_request->which = BAD_WHICH;
1272         rbd_assert(obj_request_img_data_test(obj_request));
1273         rbd_assert(obj_request->img_request == img_request);
1274         obj_request->img_request = NULL;
1275         obj_request->callback = NULL;
1276         rbd_obj_request_put(obj_request);
1277 }
1278
1279 static bool obj_request_type_valid(enum obj_request_type type)
1280 {
1281         switch (type) {
1282         case OBJ_REQUEST_NODATA:
1283         case OBJ_REQUEST_BIO:
1284         case OBJ_REQUEST_PAGES:
1285                 return true;
1286         default:
1287                 return false;
1288         }
1289 }
1290
1291 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1292                                 struct rbd_obj_request *obj_request)
1293 {
1294         dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1295
1296         return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1297 }
1298
1299 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1300 {
1301
1302         dout("%s: img %p\n", __func__, img_request);
1303
1304         /*
1305          * If no error occurred, compute the aggregate transfer
1306          * count for the image request.  We could instead use
1307          * atomic64_cmpxchg() to update it as each object request
1308          * completes; not clear which way is better off hand.
1309          */
1310         if (!img_request->result) {
1311                 struct rbd_obj_request *obj_request;
1312                 u64 xferred = 0;
1313
1314                 for_each_obj_request(img_request, obj_request)
1315                         xferred += obj_request->xferred;
1316                 img_request->xferred = xferred;
1317         }
1318
1319         if (img_request->callback)
1320                 img_request->callback(img_request);
1321         else
1322                 rbd_img_request_put(img_request);
1323 }
1324
1325 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1326
1327 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1328 {
1329         dout("%s: obj %p\n", __func__, obj_request);
1330
1331         return wait_for_completion_interruptible(&obj_request->completion);
1332 }
1333
1334 /*
1335  * The default/initial value for all image request flags is 0.  Each
1336  * is conditionally set to 1 at image request initialization time
1337  * and currently never change thereafter.
1338  */
1339 static void img_request_write_set(struct rbd_img_request *img_request)
1340 {
1341         set_bit(IMG_REQ_WRITE, &img_request->flags);
1342         smp_mb();
1343 }
1344
1345 static bool img_request_write_test(struct rbd_img_request *img_request)
1346 {
1347         smp_mb();
1348         return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1349 }
1350
1351 static void img_request_child_set(struct rbd_img_request *img_request)
1352 {
1353         set_bit(IMG_REQ_CHILD, &img_request->flags);
1354         smp_mb();
1355 }
1356
1357 static bool img_request_child_test(struct rbd_img_request *img_request)
1358 {
1359         smp_mb();
1360         return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1361 }
1362
1363 static void img_request_layered_set(struct rbd_img_request *img_request)
1364 {
1365         set_bit(IMG_REQ_LAYERED, &img_request->flags);
1366         smp_mb();
1367 }
1368
1369 static bool img_request_layered_test(struct rbd_img_request *img_request)
1370 {
1371         smp_mb();
1372         return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1373 }
1374
1375 static void
1376 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1377 {
1378         u64 xferred = obj_request->xferred;
1379         u64 length = obj_request->length;
1380
1381         dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1382                 obj_request, obj_request->img_request, obj_request->result,
1383                 xferred, length);
1384         /*
1385          * ENOENT means a hole in the image.  We zero-fill the
1386          * entire length of the request.  A short read also implies
1387          * zero-fill to the end of the request.  Either way we
1388          * update the xferred count to indicate the whole request
1389          * was satisfied.
1390          */
1391         rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1392         if (obj_request->result == -ENOENT) {
1393                 if (obj_request->type == OBJ_REQUEST_BIO)
1394                         zero_bio_chain(obj_request->bio_list, 0);
1395                 else
1396                         zero_pages(obj_request->pages, 0, length);
1397                 obj_request->result = 0;
1398                 obj_request->xferred = length;
1399         } else if (xferred < length && !obj_request->result) {
1400                 if (obj_request->type == OBJ_REQUEST_BIO)
1401                         zero_bio_chain(obj_request->bio_list, xferred);
1402                 else
1403                         zero_pages(obj_request->pages, xferred, length);
1404                 obj_request->xferred = length;
1405         }
1406         obj_request_done_set(obj_request);
1407 }
1408
1409 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1410 {
1411         dout("%s: obj %p cb %p\n", __func__, obj_request,
1412                 obj_request->callback);
1413         if (obj_request->callback)
1414                 obj_request->callback(obj_request);
1415         else
1416                 complete_all(&obj_request->completion);
1417 }
1418
1419 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1420 {
1421         dout("%s: obj %p\n", __func__, obj_request);
1422         obj_request_done_set(obj_request);
1423 }
1424
1425 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1426 {
1427         struct rbd_img_request *img_request = NULL;
1428         struct rbd_device *rbd_dev = NULL;
1429         bool layered = false;
1430
1431         if (obj_request_img_data_test(obj_request)) {
1432                 img_request = obj_request->img_request;
1433                 layered = img_request && img_request_layered_test(img_request);
1434                 rbd_dev = img_request->rbd_dev;
1435         }
1436
1437         dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1438                 obj_request, img_request, obj_request->result,
1439                 obj_request->xferred, obj_request->length);
1440         if (layered && obj_request->result == -ENOENT &&
1441                         obj_request->img_offset < rbd_dev->parent_overlap)
1442                 rbd_img_parent_read(obj_request);
1443         else if (img_request)
1444                 rbd_img_obj_request_read_callback(obj_request);
1445         else
1446                 obj_request_done_set(obj_request);
1447 }
1448
1449 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1450 {
1451         dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1452                 obj_request->result, obj_request->length);
1453         /*
1454          * There is no such thing as a successful short write.  Set
1455          * it to our originally-requested length.
1456          */
1457         obj_request->xferred = obj_request->length;
1458         obj_request_done_set(obj_request);
1459 }
1460
1461 /*
1462  * For a simple stat call there's nothing to do.  We'll do more if
1463  * this is part of a write sequence for a layered image.
1464  */
1465 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1466 {
1467         dout("%s: obj %p\n", __func__, obj_request);
1468         obj_request_done_set(obj_request);
1469 }
1470
1471 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1472                                 struct ceph_msg *msg)
1473 {
1474         struct rbd_obj_request *obj_request = osd_req->r_priv;
1475         u16 opcode;
1476
1477         dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1478         rbd_assert(osd_req == obj_request->osd_req);
1479         if (obj_request_img_data_test(obj_request)) {
1480                 rbd_assert(obj_request->img_request);
1481                 rbd_assert(obj_request->which != BAD_WHICH);
1482         } else {
1483                 rbd_assert(obj_request->which == BAD_WHICH);
1484         }
1485
1486         if (osd_req->r_result < 0)
1487                 obj_request->result = osd_req->r_result;
1488
1489         BUG_ON(osd_req->r_num_ops > 2);
1490
1491         /*
1492          * We support a 64-bit length, but ultimately it has to be
1493          * passed to blk_end_request(), which takes an unsigned int.
1494          */
1495         obj_request->xferred = osd_req->r_reply_op_len[0];
1496         rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1497         opcode = osd_req->r_ops[0].op;
1498         switch (opcode) {
1499         case CEPH_OSD_OP_READ:
1500                 rbd_osd_read_callback(obj_request);
1501                 break;
1502         case CEPH_OSD_OP_WRITE:
1503                 rbd_osd_write_callback(obj_request);
1504                 break;
1505         case CEPH_OSD_OP_STAT:
1506                 rbd_osd_stat_callback(obj_request);
1507                 break;
1508         case CEPH_OSD_OP_CALL:
1509         case CEPH_OSD_OP_NOTIFY_ACK:
1510         case CEPH_OSD_OP_WATCH:
1511                 rbd_osd_trivial_callback(obj_request);
1512                 break;
1513         default:
1514                 rbd_warn(NULL, "%s: unsupported op %hu\n",
1515                         obj_request->object_name, (unsigned short) opcode);
1516                 break;
1517         }
1518
1519         if (obj_request_done_test(obj_request))
1520                 rbd_obj_request_complete(obj_request);
1521 }
1522
1523 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1524 {
1525         struct rbd_img_request *img_request = obj_request->img_request;
1526         struct ceph_osd_request *osd_req = obj_request->osd_req;
1527         u64 snap_id;
1528
1529         rbd_assert(osd_req != NULL);
1530
1531         snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1532         ceph_osdc_build_request(osd_req, obj_request->offset,
1533                         NULL, snap_id, NULL);
1534 }
1535
1536 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1537 {
1538         struct rbd_img_request *img_request = obj_request->img_request;
1539         struct ceph_osd_request *osd_req = obj_request->osd_req;
1540         struct ceph_snap_context *snapc;
1541         struct timespec mtime = CURRENT_TIME;
1542
1543         rbd_assert(osd_req != NULL);
1544
1545         snapc = img_request ? img_request->snapc : NULL;
1546         ceph_osdc_build_request(osd_req, obj_request->offset,
1547                         snapc, CEPH_NOSNAP, &mtime);
1548 }
1549
1550 static struct ceph_osd_request *rbd_osd_req_create(
1551                                         struct rbd_device *rbd_dev,
1552                                         bool write_request,
1553                                         struct rbd_obj_request *obj_request)
1554 {
1555         struct ceph_snap_context *snapc = NULL;
1556         struct ceph_osd_client *osdc;
1557         struct ceph_osd_request *osd_req;
1558
1559         if (obj_request_img_data_test(obj_request)) {
1560                 struct rbd_img_request *img_request = obj_request->img_request;
1561
1562                 rbd_assert(write_request ==
1563                                 img_request_write_test(img_request));
1564                 if (write_request)
1565                         snapc = img_request->snapc;
1566         }
1567
1568         /* Allocate and initialize the request, for the single op */
1569
1570         osdc = &rbd_dev->rbd_client->client->osdc;
1571         osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
1572         if (!osd_req)
1573                 return NULL;    /* ENOMEM */
1574
1575         if (write_request)
1576                 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1577         else
1578                 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1579
1580         osd_req->r_callback = rbd_osd_req_callback;
1581         osd_req->r_priv = obj_request;
1582
1583         osd_req->r_oid_len = strlen(obj_request->object_name);
1584         rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1585         memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1586
1587         osd_req->r_file_layout = rbd_dev->layout;       /* struct */
1588
1589         return osd_req;
1590 }
1591
1592 /*
1593  * Create a copyup osd request based on the information in the
1594  * object request supplied.  A copyup request has two osd ops,
1595  * a copyup method call, and a "normal" write request.
1596  */
1597 static struct ceph_osd_request *
1598 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1599 {
1600         struct rbd_img_request *img_request;
1601         struct ceph_snap_context *snapc;
1602         struct rbd_device *rbd_dev;
1603         struct ceph_osd_client *osdc;
1604         struct ceph_osd_request *osd_req;
1605
1606         rbd_assert(obj_request_img_data_test(obj_request));
1607         img_request = obj_request->img_request;
1608         rbd_assert(img_request);
1609         rbd_assert(img_request_write_test(img_request));
1610
1611         /* Allocate and initialize the request, for the two ops */
1612
1613         snapc = img_request->snapc;
1614         rbd_dev = img_request->rbd_dev;
1615         osdc = &rbd_dev->rbd_client->client->osdc;
1616         osd_req = ceph_osdc_alloc_request(osdc, snapc, 2, false, GFP_ATOMIC);
1617         if (!osd_req)
1618                 return NULL;    /* ENOMEM */
1619
1620         osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1621         osd_req->r_callback = rbd_osd_req_callback;
1622         osd_req->r_priv = obj_request;
1623
1624         osd_req->r_oid_len = strlen(obj_request->object_name);
1625         rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1626         memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1627
1628         osd_req->r_file_layout = rbd_dev->layout;       /* struct */
1629
1630         return osd_req;
1631 }
1632
1633
1634 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1635 {
1636         ceph_osdc_put_request(osd_req);
1637 }
1638
1639 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1640
1641 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1642                                                 u64 offset, u64 length,
1643                                                 enum obj_request_type type)
1644 {
1645         struct rbd_obj_request *obj_request;
1646         size_t size;
1647         char *name;
1648
1649         rbd_assert(obj_request_type_valid(type));
1650
1651         size = strlen(object_name) + 1;
1652         obj_request = kzalloc(sizeof (*obj_request) + size, GFP_KERNEL);
1653         if (!obj_request)
1654                 return NULL;
1655
1656         name = (char *)(obj_request + 1);
1657         obj_request->object_name = memcpy(name, object_name, size);
1658         obj_request->offset = offset;
1659         obj_request->length = length;
1660         obj_request->flags = 0;
1661         obj_request->which = BAD_WHICH;
1662         obj_request->type = type;
1663         INIT_LIST_HEAD(&obj_request->links);
1664         init_completion(&obj_request->completion);
1665         kref_init(&obj_request->kref);
1666
1667         dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1668                 offset, length, (int)type, obj_request);
1669
1670         return obj_request;
1671 }
1672
1673 static void rbd_obj_request_destroy(struct kref *kref)
1674 {
1675         struct rbd_obj_request *obj_request;
1676
1677         obj_request = container_of(kref, struct rbd_obj_request, kref);
1678
1679         dout("%s: obj %p\n", __func__, obj_request);
1680
1681         rbd_assert(obj_request->img_request == NULL);
1682         rbd_assert(obj_request->which == BAD_WHICH);
1683
1684         if (obj_request->osd_req)
1685                 rbd_osd_req_destroy(obj_request->osd_req);
1686
1687         rbd_assert(obj_request_type_valid(obj_request->type));
1688         switch (obj_request->type) {
1689         case OBJ_REQUEST_NODATA:
1690                 break;          /* Nothing to do */
1691         case OBJ_REQUEST_BIO:
1692                 if (obj_request->bio_list)
1693                         bio_chain_put(obj_request->bio_list);
1694                 break;
1695         case OBJ_REQUEST_PAGES:
1696                 if (obj_request->pages)
1697                         ceph_release_page_vector(obj_request->pages,
1698                                                 obj_request->page_count);
1699                 break;
1700         }
1701
1702         kfree(obj_request);
1703 }
1704
1705 /*
1706  * Caller is responsible for filling in the list of object requests
1707  * that comprises the image request, and the Linux request pointer
1708  * (if there is one).
1709  */
1710 static struct rbd_img_request *rbd_img_request_create(
1711                                         struct rbd_device *rbd_dev,
1712                                         u64 offset, u64 length,
1713                                         bool write_request,
1714                                         bool child_request)
1715 {
1716         struct rbd_img_request *img_request;
1717
1718         img_request = kmalloc(sizeof (*img_request), GFP_ATOMIC);
1719         if (!img_request)
1720                 return NULL;
1721
1722         if (write_request) {
1723                 down_read(&rbd_dev->header_rwsem);
1724                 ceph_get_snap_context(rbd_dev->header.snapc);
1725                 up_read(&rbd_dev->header_rwsem);
1726         }
1727
1728         img_request->rq = NULL;
1729         img_request->rbd_dev = rbd_dev;
1730         img_request->offset = offset;
1731         img_request->length = length;
1732         img_request->flags = 0;
1733         if (write_request) {
1734                 img_request_write_set(img_request);
1735                 img_request->snapc = rbd_dev->header.snapc;
1736         } else {
1737                 img_request->snap_id = rbd_dev->spec->snap_id;
1738         }
1739         if (child_request)
1740                 img_request_child_set(img_request);
1741         if (rbd_dev->parent_spec)
1742                 img_request_layered_set(img_request);
1743         spin_lock_init(&img_request->completion_lock);
1744         img_request->next_completion = 0;
1745         img_request->callback = NULL;
1746         img_request->result = 0;
1747         img_request->obj_request_count = 0;
1748         INIT_LIST_HEAD(&img_request->obj_requests);
1749         kref_init(&img_request->kref);
1750
1751         rbd_img_request_get(img_request);       /* Avoid a warning */
1752         rbd_img_request_put(img_request);       /* TEMPORARY */
1753
1754         dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
1755                 write_request ? "write" : "read", offset, length,
1756                 img_request);
1757
1758         return img_request;
1759 }
1760
1761 static void rbd_img_request_destroy(struct kref *kref)
1762 {
1763         struct rbd_img_request *img_request;
1764         struct rbd_obj_request *obj_request;
1765         struct rbd_obj_request *next_obj_request;
1766
1767         img_request = container_of(kref, struct rbd_img_request, kref);
1768
1769         dout("%s: img %p\n", __func__, img_request);
1770
1771         for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1772                 rbd_img_obj_request_del(img_request, obj_request);
1773         rbd_assert(img_request->obj_request_count == 0);
1774
1775         if (img_request_write_test(img_request))
1776                 ceph_put_snap_context(img_request->snapc);
1777
1778         if (img_request_child_test(img_request))
1779                 rbd_obj_request_put(img_request->obj_request);
1780
1781         kfree(img_request);
1782 }
1783
1784 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
1785 {
1786         struct rbd_img_request *img_request;
1787         unsigned int xferred;
1788         int result;
1789         bool more;
1790
1791         rbd_assert(obj_request_img_data_test(obj_request));
1792         img_request = obj_request->img_request;
1793
1794         rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
1795         xferred = (unsigned int)obj_request->xferred;
1796         result = obj_request->result;
1797         if (result) {
1798                 struct rbd_device *rbd_dev = img_request->rbd_dev;
1799
1800                 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
1801                         img_request_write_test(img_request) ? "write" : "read",
1802                         obj_request->length, obj_request->img_offset,
1803                         obj_request->offset);
1804                 rbd_warn(rbd_dev, "  result %d xferred %x\n",
1805                         result, xferred);
1806                 if (!img_request->result)
1807                         img_request->result = result;
1808         }
1809
1810         /* Image object requests don't own their page array */
1811
1812         if (obj_request->type == OBJ_REQUEST_PAGES) {
1813                 obj_request->pages = NULL;
1814                 obj_request->page_count = 0;
1815         }
1816
1817         if (img_request_child_test(img_request)) {
1818                 rbd_assert(img_request->obj_request != NULL);
1819                 more = obj_request->which < img_request->obj_request_count - 1;
1820         } else {
1821                 rbd_assert(img_request->rq != NULL);
1822                 more = blk_end_request(img_request->rq, result, xferred);
1823         }
1824
1825         return more;
1826 }
1827
1828 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
1829 {
1830         struct rbd_img_request *img_request;
1831         u32 which = obj_request->which;
1832         bool more = true;
1833
1834         rbd_assert(obj_request_img_data_test(obj_request));
1835         img_request = obj_request->img_request;
1836
1837         dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1838         rbd_assert(img_request != NULL);
1839         rbd_assert(img_request->obj_request_count > 0);
1840         rbd_assert(which != BAD_WHICH);
1841         rbd_assert(which < img_request->obj_request_count);
1842         rbd_assert(which >= img_request->next_completion);
1843
1844         spin_lock_irq(&img_request->completion_lock);
1845         if (which != img_request->next_completion)
1846                 goto out;
1847
1848         for_each_obj_request_from(img_request, obj_request) {
1849                 rbd_assert(more);
1850                 rbd_assert(which < img_request->obj_request_count);
1851
1852                 if (!obj_request_done_test(obj_request))
1853                         break;
1854                 more = rbd_img_obj_end_request(obj_request);
1855                 which++;
1856         }
1857
1858         rbd_assert(more ^ (which == img_request->obj_request_count));
1859         img_request->next_completion = which;
1860 out:
1861         spin_unlock_irq(&img_request->completion_lock);
1862
1863         if (!more)
1864                 rbd_img_request_complete(img_request);
1865 }
1866
1867 /*
1868  * Split up an image request into one or more object requests, each
1869  * to a different object.  The "type" parameter indicates whether
1870  * "data_desc" is the pointer to the head of a list of bio
1871  * structures, or the base of a page array.  In either case this
1872  * function assumes data_desc describes memory sufficient to hold
1873  * all data described by the image request.
1874  */
1875 static int rbd_img_request_fill(struct rbd_img_request *img_request,
1876                                         enum obj_request_type type,
1877                                         void *data_desc)
1878 {
1879         struct rbd_device *rbd_dev = img_request->rbd_dev;
1880         struct rbd_obj_request *obj_request = NULL;
1881         struct rbd_obj_request *next_obj_request;
1882         bool write_request = img_request_write_test(img_request);
1883         struct bio *bio_list;
1884         unsigned int bio_offset = 0;
1885         struct page **pages;
1886         u64 img_offset;
1887         u64 resid;
1888         u16 opcode;
1889
1890         dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
1891                 (int)type, data_desc);
1892
1893         opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
1894         img_offset = img_request->offset;
1895         resid = img_request->length;
1896         rbd_assert(resid > 0);
1897
1898         if (type == OBJ_REQUEST_BIO) {
1899                 bio_list = data_desc;
1900                 rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
1901         } else {
1902                 rbd_assert(type == OBJ_REQUEST_PAGES);
1903                 pages = data_desc;
1904         }
1905
1906         while (resid) {
1907                 struct ceph_osd_request *osd_req;
1908                 const char *object_name;
1909                 u64 offset;
1910                 u64 length;
1911
1912                 object_name = rbd_segment_name(rbd_dev, img_offset);
1913                 if (!object_name)
1914                         goto out_unwind;
1915                 offset = rbd_segment_offset(rbd_dev, img_offset);
1916                 length = rbd_segment_length(rbd_dev, img_offset, resid);
1917                 obj_request = rbd_obj_request_create(object_name,
1918                                                 offset, length, type);
1919                 kfree(object_name);     /* object request has its own copy */
1920                 if (!obj_request)
1921                         goto out_unwind;
1922
1923                 if (type == OBJ_REQUEST_BIO) {
1924                         unsigned int clone_size;
1925
1926                         rbd_assert(length <= (u64)UINT_MAX);
1927                         clone_size = (unsigned int)length;
1928                         obj_request->bio_list =
1929                                         bio_chain_clone_range(&bio_list,
1930                                                                 &bio_offset,
1931                                                                 clone_size,
1932                                                                 GFP_ATOMIC);
1933                         if (!obj_request->bio_list)
1934                                 goto out_partial;
1935                 } else {
1936                         unsigned int page_count;
1937
1938                         obj_request->pages = pages;
1939                         page_count = (u32)calc_pages_for(offset, length);
1940                         obj_request->page_count = page_count;
1941                         if ((offset + length) & ~PAGE_MASK)
1942                                 page_count--;   /* more on last page */
1943                         pages += page_count;
1944                 }
1945
1946                 osd_req = rbd_osd_req_create(rbd_dev, write_request,
1947                                                 obj_request);
1948                 if (!osd_req)
1949                         goto out_partial;
1950                 obj_request->osd_req = osd_req;
1951                 obj_request->callback = rbd_img_obj_callback;
1952
1953                 osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
1954                                                 0, 0);
1955                 if (type == OBJ_REQUEST_BIO)
1956                         osd_req_op_extent_osd_data_bio(osd_req, 0,
1957                                         obj_request->bio_list, length);
1958                 else
1959                         osd_req_op_extent_osd_data_pages(osd_req, 0,
1960                                         obj_request->pages, length,
1961                                         offset & ~PAGE_MASK, false, false);
1962
1963                 if (write_request)
1964                         rbd_osd_req_format_write(obj_request);
1965                 else
1966                         rbd_osd_req_format_read(obj_request);
1967
1968                 obj_request->img_offset = img_offset;
1969                 rbd_img_obj_request_add(img_request, obj_request);
1970
1971                 img_offset += length;
1972                 resid -= length;
1973         }
1974
1975         return 0;
1976
1977 out_partial:
1978         rbd_obj_request_put(obj_request);
1979 out_unwind:
1980         for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1981                 rbd_obj_request_put(obj_request);
1982
1983         return -ENOMEM;
1984 }
1985
1986 static void
1987 rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
1988 {
1989         struct rbd_img_request *img_request;
1990         struct rbd_device *rbd_dev;
1991         u64 length;
1992         u32 page_count;
1993
1994         rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
1995         rbd_assert(obj_request_img_data_test(obj_request));
1996         img_request = obj_request->img_request;
1997         rbd_assert(img_request);
1998
1999         rbd_dev = img_request->rbd_dev;
2000         rbd_assert(rbd_dev);
2001         length = (u64)1 << rbd_dev->header.obj_order;
2002         page_count = (u32)calc_pages_for(0, length);
2003
2004         rbd_assert(obj_request->copyup_pages);
2005         ceph_release_page_vector(obj_request->copyup_pages, page_count);
2006         obj_request->copyup_pages = NULL;
2007
2008         /*
2009          * We want the transfer count to reflect the size of the
2010          * original write request.  There is no such thing as a
2011          * successful short write, so if the request was successful
2012          * we can just set it to the originally-requested length.
2013          */
2014         if (!obj_request->result)
2015                 obj_request->xferred = obj_request->length;
2016
2017         /* Finish up with the normal image object callback */
2018
2019         rbd_img_obj_callback(obj_request);
2020 }
2021
2022 static void
2023 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2024 {
2025         struct rbd_obj_request *orig_request;
2026         struct ceph_osd_request *osd_req;
2027         struct ceph_osd_client *osdc;
2028         struct rbd_device *rbd_dev;
2029         struct page **pages;
2030         int result;
2031         u64 obj_size;
2032         u64 xferred;
2033
2034         rbd_assert(img_request_child_test(img_request));
2035
2036         /* First get what we need from the image request */
2037
2038         pages = img_request->copyup_pages;
2039         rbd_assert(pages != NULL);
2040         img_request->copyup_pages = NULL;
2041
2042         orig_request = img_request->obj_request;
2043         rbd_assert(orig_request != NULL);
2044         rbd_assert(orig_request->type == OBJ_REQUEST_BIO);
2045         result = img_request->result;
2046         obj_size = img_request->length;
2047         xferred = img_request->xferred;
2048
2049         rbd_dev = img_request->rbd_dev;
2050         rbd_assert(rbd_dev);
2051         rbd_assert(obj_size == (u64)1 << rbd_dev->header.obj_order);
2052
2053         rbd_img_request_put(img_request);
2054
2055         if (result)
2056                 goto out_err;
2057
2058         /* Allocate the new copyup osd request for the original request */
2059
2060         result = -ENOMEM;
2061         rbd_assert(!orig_request->osd_req);
2062         osd_req = rbd_osd_req_create_copyup(orig_request);
2063         if (!osd_req)
2064                 goto out_err;
2065         orig_request->osd_req = osd_req;
2066         orig_request->copyup_pages = pages;
2067
2068         /* Initialize the copyup op */
2069
2070         osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2071         osd_req_op_cls_request_data_pages(osd_req, 0, pages, obj_size, 0,
2072                                                 false, false);
2073
2074         /* Then the original write request op */
2075
2076         osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
2077                                         orig_request->offset,
2078                                         orig_request->length, 0, 0);
2079         osd_req_op_extent_osd_data_bio(osd_req, 1, orig_request->bio_list,
2080                                         orig_request->length);
2081
2082         rbd_osd_req_format_write(orig_request);
2083
2084         /* All set, send it off. */
2085
2086         orig_request->callback = rbd_img_obj_copyup_callback;
2087         osdc = &rbd_dev->rbd_client->client->osdc;
2088         result = rbd_obj_request_submit(osdc, orig_request);
2089         if (!result)
2090                 return;
2091 out_err:
2092         /* Record the error code and complete the request */
2093
2094         orig_request->result = result;
2095         orig_request->xferred = 0;
2096         obj_request_done_set(orig_request);
2097         rbd_obj_request_complete(orig_request);
2098 }
2099
2100 /*
2101  * Read from the parent image the range of data that covers the
2102  * entire target of the given object request.  This is used for
2103  * satisfying a layered image write request when the target of an
2104  * object request from the image request does not exist.
2105  *
2106  * A page array big enough to hold the returned data is allocated
2107  * and supplied to rbd_img_request_fill() as the "data descriptor."
2108  * When the read completes, this page array will be transferred to
2109  * the original object request for the copyup operation.
2110  *
2111  * If an error occurs, record it as the result of the original
2112  * object request and mark it done so it gets completed.
2113  */
2114 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2115 {
2116         struct rbd_img_request *img_request = NULL;
2117         struct rbd_img_request *parent_request = NULL;
2118         struct rbd_device *rbd_dev;
2119         u64 img_offset;
2120         u64 length;
2121         struct page **pages = NULL;
2122         u32 page_count;
2123         int result;
2124
2125         rbd_assert(obj_request_img_data_test(obj_request));
2126         rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2127
2128         img_request = obj_request->img_request;
2129         rbd_assert(img_request != NULL);
2130         rbd_dev = img_request->rbd_dev;
2131         rbd_assert(rbd_dev->parent != NULL);
2132
2133         /*
2134          * First things first.  The original osd request is of no
2135          * use to use any more, we'll need a new one that can hold
2136          * the two ops in a copyup request.  We'll get that later,
2137          * but for now we can release the old one.
2138          */
2139         rbd_osd_req_destroy(obj_request->osd_req);
2140         obj_request->osd_req = NULL;
2141
2142         /*
2143          * Determine the byte range covered by the object in the
2144          * child image to which the original request was to be sent.
2145          */
2146         img_offset = obj_request->img_offset - obj_request->offset;
2147         length = (u64)1 << rbd_dev->header.obj_order;
2148
2149         /*
2150          * There is no defined parent data beyond the parent
2151          * overlap, so limit what we read at that boundary if
2152          * necessary.
2153          */
2154         if (img_offset + length > rbd_dev->parent_overlap) {
2155                 rbd_assert(img_offset < rbd_dev->parent_overlap);
2156                 length = rbd_dev->parent_overlap - img_offset;
2157         }
2158
2159         /*
2160          * Allocate a page array big enough to receive the data read
2161          * from the parent.
2162          */
2163         page_count = (u32)calc_pages_for(0, length);
2164         pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2165         if (IS_ERR(pages)) {
2166                 result = PTR_ERR(pages);
2167                 pages = NULL;
2168                 goto out_err;
2169         }
2170
2171         result = -ENOMEM;
2172         parent_request = rbd_img_request_create(rbd_dev->parent,
2173                                                 img_offset, length,
2174                                                 false, true);
2175         if (!parent_request)
2176                 goto out_err;
2177         rbd_obj_request_get(obj_request);
2178         parent_request->obj_request = obj_request;
2179
2180         result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2181         if (result)
2182                 goto out_err;
2183         parent_request->copyup_pages = pages;
2184
2185         parent_request->callback = rbd_img_obj_parent_read_full_callback;
2186         result = rbd_img_request_submit(parent_request);
2187         if (!result)
2188                 return 0;
2189
2190         parent_request->copyup_pages = NULL;
2191         parent_request->obj_request = NULL;
2192         rbd_obj_request_put(obj_request);
2193 out_err:
2194         if (pages)
2195                 ceph_release_page_vector(pages, page_count);
2196         if (parent_request)
2197                 rbd_img_request_put(parent_request);
2198         obj_request->result = result;
2199         obj_request->xferred = 0;
2200         obj_request_done_set(obj_request);
2201
2202         return result;
2203 }
2204
2205 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2206 {
2207         struct rbd_obj_request *orig_request;
2208         int result;
2209
2210         rbd_assert(!obj_request_img_data_test(obj_request));
2211
2212         /*
2213          * All we need from the object request is the original
2214          * request and the result of the STAT op.  Grab those, then
2215          * we're done with the request.
2216          */
2217         orig_request = obj_request->obj_request;
2218         obj_request->obj_request = NULL;
2219         rbd_assert(orig_request);
2220         rbd_assert(orig_request->img_request);
2221
2222         result = obj_request->result;
2223         obj_request->result = 0;
2224
2225         dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2226                 obj_request, orig_request, result,
2227                 obj_request->xferred, obj_request->length);
2228         rbd_obj_request_put(obj_request);
2229
2230         rbd_assert(orig_request);
2231         rbd_assert(orig_request->img_request);
2232
2233         /*
2234          * Our only purpose here is to determine whether the object
2235          * exists, and we don't want to treat the non-existence as
2236          * an error.  If something else comes back, transfer the
2237          * error to the original request and complete it now.
2238          */
2239         if (!result) {
2240                 obj_request_existence_set(orig_request, true);
2241         } else if (result == -ENOENT) {
2242                 obj_request_existence_set(orig_request, false);
2243         } else if (result) {
2244                 orig_request->result = result;
2245                 goto out;
2246         }
2247
2248         /*
2249          * Resubmit the original request now that we have recorded
2250          * whether the target object exists.
2251          */
2252         orig_request->result = rbd_img_obj_request_submit(orig_request);
2253 out:
2254         if (orig_request->result)
2255                 rbd_obj_request_complete(orig_request);
2256         rbd_obj_request_put(orig_request);
2257 }
2258
2259 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2260 {
2261         struct rbd_obj_request *stat_request;
2262         struct rbd_device *rbd_dev;
2263         struct ceph_osd_client *osdc;
2264         struct page **pages = NULL;
2265         u32 page_count;
2266         size_t size;
2267         int ret;
2268
2269         /*
2270          * The response data for a STAT call consists of:
2271          *     le64 length;
2272          *     struct {
2273          *         le32 tv_sec;
2274          *         le32 tv_nsec;
2275          *     } mtime;
2276          */
2277         size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2278         page_count = (u32)calc_pages_for(0, size);
2279         pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2280         if (IS_ERR(pages))
2281                 return PTR_ERR(pages);
2282
2283         ret = -ENOMEM;
2284         stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2285                                                         OBJ_REQUEST_PAGES);
2286         if (!stat_request)
2287                 goto out;
2288
2289         rbd_obj_request_get(obj_request);
2290         stat_request->obj_request = obj_request;
2291         stat_request->pages = pages;
2292         stat_request->page_count = page_count;
2293
2294         rbd_assert(obj_request->img_request);
2295         rbd_dev = obj_request->img_request->rbd_dev;
2296         stat_request->osd_req = rbd_osd_req_create(rbd_dev, false,
2297                                                 stat_request);
2298         if (!stat_request->osd_req)
2299                 goto out;
2300         stat_request->callback = rbd_img_obj_exists_callback;
2301
2302         osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2303         osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2304                                         false, false);
2305         rbd_osd_req_format_read(stat_request);
2306
2307         osdc = &rbd_dev->rbd_client->client->osdc;
2308         ret = rbd_obj_request_submit(osdc, stat_request);
2309 out:
2310         if (ret)
2311                 rbd_obj_request_put(obj_request);
2312
2313         return ret;
2314 }
2315
2316 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2317 {
2318         struct rbd_img_request *img_request;
2319         struct rbd_device *rbd_dev;
2320         bool known;
2321
2322         rbd_assert(obj_request_img_data_test(obj_request));
2323
2324         img_request = obj_request->img_request;
2325         rbd_assert(img_request);
2326         rbd_dev = img_request->rbd_dev;
2327
2328         /*
2329          * Only writes to layered images need special handling.
2330          * Reads and non-layered writes are simple object requests.
2331          * Layered writes that start beyond the end of the overlap
2332          * with the parent have no parent data, so they too are
2333          * simple object requests.  Finally, if the target object is
2334          * known to already exist, its parent data has already been
2335          * copied, so a write to the object can also be handled as a
2336          * simple object request.
2337          */
2338         if (!img_request_write_test(img_request) ||
2339                 !img_request_layered_test(img_request) ||
2340                 rbd_dev->parent_overlap <= obj_request->img_offset ||
2341                 ((known = obj_request_known_test(obj_request)) &&
2342                         obj_request_exists_test(obj_request))) {
2343
2344                 struct rbd_device *rbd_dev;
2345                 struct ceph_osd_client *osdc;
2346
2347                 rbd_dev = obj_request->img_request->rbd_dev;
2348                 osdc = &rbd_dev->rbd_client->client->osdc;
2349
2350                 return rbd_obj_request_submit(osdc, obj_request);
2351         }
2352
2353         /*
2354          * It's a layered write.  The target object might exist but
2355          * we may not know that yet.  If we know it doesn't exist,
2356          * start by reading the data for the full target object from
2357          * the parent so we can use it for a copyup to the target.
2358          */
2359         if (known)
2360                 return rbd_img_obj_parent_read_full(obj_request);
2361
2362         /* We don't know whether the target exists.  Go find out. */
2363
2364         return rbd_img_obj_exists_submit(obj_request);
2365 }
2366
2367 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2368 {
2369         struct rbd_obj_request *obj_request;
2370         struct rbd_obj_request *next_obj_request;
2371
2372         dout("%s: img %p\n", __func__, img_request);
2373         for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2374                 int ret;
2375
2376                 ret = rbd_img_obj_request_submit(obj_request);
2377                 if (ret)
2378                         return ret;
2379         }
2380
2381         return 0;
2382 }
2383
2384 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2385 {
2386         struct rbd_obj_request *obj_request;
2387         struct rbd_device *rbd_dev;
2388         u64 obj_end;
2389
2390         rbd_assert(img_request_child_test(img_request));
2391
2392         obj_request = img_request->obj_request;
2393         rbd_assert(obj_request);
2394         rbd_assert(obj_request->img_request);
2395
2396         obj_request->result = img_request->result;
2397         if (obj_request->result)
2398                 goto out;
2399
2400         /*
2401          * We need to zero anything beyond the parent overlap
2402          * boundary.  Since rbd_img_obj_request_read_callback()
2403          * will zero anything beyond the end of a short read, an
2404          * easy way to do this is to pretend the data from the
2405          * parent came up short--ending at the overlap boundary.
2406          */
2407         rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2408         obj_end = obj_request->img_offset + obj_request->length;
2409         rbd_dev = obj_request->img_request->rbd_dev;
2410         if (obj_end > rbd_dev->parent_overlap) {
2411                 u64 xferred = 0;
2412
2413                 if (obj_request->img_offset < rbd_dev->parent_overlap)
2414                         xferred = rbd_dev->parent_overlap -
2415                                         obj_request->img_offset;
2416
2417                 obj_request->xferred = min(img_request->xferred, xferred);
2418         } else {
2419                 obj_request->xferred = img_request->xferred;
2420         }
2421 out:
2422         rbd_img_obj_request_read_callback(obj_request);
2423         rbd_obj_request_complete(obj_request);
2424 }
2425
2426 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2427 {
2428         struct rbd_device *rbd_dev;
2429         struct rbd_img_request *img_request;
2430         int result;
2431
2432         rbd_assert(obj_request_img_data_test(obj_request));
2433         rbd_assert(obj_request->img_request != NULL);
2434         rbd_assert(obj_request->result == (s32) -ENOENT);
2435         rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2436
2437         rbd_dev = obj_request->img_request->rbd_dev;
2438         rbd_assert(rbd_dev->parent != NULL);
2439         /* rbd_read_finish(obj_request, obj_request->length); */
2440         img_request = rbd_img_request_create(rbd_dev->parent,
2441                                                 obj_request->img_offset,
2442                                                 obj_request->length,
2443                                                 false, true);
2444         result = -ENOMEM;
2445         if (!img_request)
2446                 goto out_err;
2447
2448         rbd_obj_request_get(obj_request);
2449         img_request->obj_request = obj_request;
2450
2451         result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2452                                         obj_request->bio_list);
2453         if (result)
2454                 goto out_err;
2455
2456         img_request->callback = rbd_img_parent_read_callback;
2457         result = rbd_img_request_submit(img_request);
2458         if (result)
2459                 goto out_err;
2460
2461         return;
2462 out_err:
2463         if (img_request)
2464                 rbd_img_request_put(img_request);
2465         obj_request->result = result;
2466         obj_request->xferred = 0;
2467         obj_request_done_set(obj_request);
2468 }
2469
2470 static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id)
2471 {
2472         struct rbd_obj_request *obj_request;
2473         struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2474         int ret;
2475
2476         obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2477                                                         OBJ_REQUEST_NODATA);
2478         if (!obj_request)
2479                 return -ENOMEM;
2480
2481         ret = -ENOMEM;
2482         obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2483         if (!obj_request->osd_req)
2484                 goto out;
2485         obj_request->callback = rbd_obj_request_put;
2486
2487         osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
2488                                         notify_id, 0, 0);
2489         rbd_osd_req_format_read(obj_request);
2490
2491         ret = rbd_obj_request_submit(osdc, obj_request);
2492 out:
2493         if (ret)
2494                 rbd_obj_request_put(obj_request);
2495
2496         return ret;
2497 }
2498
2499 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2500 {
2501         struct rbd_device *rbd_dev = (struct rbd_device *)data;
2502
2503         if (!rbd_dev)
2504                 return;
2505
2506         dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
2507                 rbd_dev->header_name, (unsigned long long)notify_id,
2508                 (unsigned int)opcode);
2509         (void)rbd_dev_refresh(rbd_dev);
2510
2511         rbd_obj_notify_ack(rbd_dev, notify_id);
2512 }
2513
2514 /*
2515  * Request sync osd watch/unwatch.  The value of "start" determines
2516  * whether a watch request is being initiated or torn down.
2517  */
2518 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
2519 {
2520         struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2521         struct rbd_obj_request *obj_request;
2522         int ret;
2523
2524         rbd_assert(start ^ !!rbd_dev->watch_event);
2525         rbd_assert(start ^ !!rbd_dev->watch_request);
2526
2527         if (start) {
2528                 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
2529                                                 &rbd_dev->watch_event);
2530                 if (ret < 0)
2531                         return ret;
2532                 rbd_assert(rbd_dev->watch_event != NULL);
2533         }
2534
2535         ret = -ENOMEM;
2536         obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2537                                                         OBJ_REQUEST_NODATA);
2538         if (!obj_request)
2539                 goto out_cancel;
2540
2541         obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request);
2542         if (!obj_request->osd_req)
2543                 goto out_cancel;
2544
2545         if (start)
2546                 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
2547         else
2548                 ceph_osdc_unregister_linger_request(osdc,
2549                                         rbd_dev->watch_request->osd_req);
2550
2551         osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2552                                 rbd_dev->watch_event->cookie, 0, start);
2553         rbd_osd_req_format_write(obj_request);
2554
2555         ret = rbd_obj_request_submit(osdc, obj_request);
2556         if (ret)
2557                 goto out_cancel;
2558         ret = rbd_obj_request_wait(obj_request);
2559         if (ret)
2560                 goto out_cancel;
2561         ret = obj_request->result;
2562         if (ret)
2563                 goto out_cancel;
2564
2565         /*
2566          * A watch request is set to linger, so the underlying osd
2567          * request won't go away until we unregister it.  We retain
2568          * a pointer to the object request during that time (in
2569          * rbd_dev->watch_request), so we'll keep a reference to
2570          * it.  We'll drop that reference (below) after we've
2571          * unregistered it.
2572          */
2573         if (start) {
2574                 rbd_dev->watch_request = obj_request;
2575
2576                 return 0;
2577         }
2578
2579         /* We have successfully torn down the watch request */
2580
2581         rbd_obj_request_put(rbd_dev->watch_request);
2582         rbd_dev->watch_request = NULL;
2583 out_cancel:
2584         /* Cancel the event if we're tearing down, or on error */
2585         ceph_osdc_cancel_event(rbd_dev->watch_event);
2586         rbd_dev->watch_event = NULL;
2587         if (obj_request)
2588                 rbd_obj_request_put(obj_request);
2589
2590         return ret;
2591 }
2592
2593 /*
2594  * Synchronous osd object method call.  Returns the number of bytes
2595  * returned in the outbound buffer, or a negative error code.
2596  */
2597 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
2598                              const char *object_name,
2599                              const char *class_name,
2600                              const char *method_name,
2601                              const void *outbound,
2602                              size_t outbound_size,
2603                              void *inbound,
2604                              size_t inbound_size)
2605 {
2606         struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2607         struct rbd_obj_request *obj_request;
2608         struct page **pages;
2609         u32 page_count;
2610         int ret;
2611
2612         /*
2613          * Method calls are ultimately read operations.  The result
2614          * should placed into the inbound buffer provided.  They
2615          * also supply outbound data--parameters for the object
2616          * method.  Currently if this is present it will be a
2617          * snapshot id.
2618          */
2619         page_count = (u32)calc_pages_for(0, inbound_size);
2620         pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2621         if (IS_ERR(pages))
2622                 return PTR_ERR(pages);
2623
2624         ret = -ENOMEM;
2625         obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
2626                                                         OBJ_REQUEST_PAGES);
2627         if (!obj_request)
2628                 goto out;
2629
2630         obj_request->pages = pages;
2631         obj_request->page_count = page_count;
2632
2633         obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2634         if (!obj_request->osd_req)
2635                 goto out;
2636
2637         osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
2638                                         class_name, method_name);
2639         if (outbound_size) {
2640                 struct ceph_pagelist *pagelist;
2641
2642                 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
2643                 if (!pagelist)
2644                         goto out;
2645
2646                 ceph_pagelist_init(pagelist);
2647                 ceph_pagelist_append(pagelist, outbound, outbound_size);
2648                 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
2649                                                 pagelist);
2650         }
2651         osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
2652                                         obj_request->pages, inbound_size,
2653                                         0, false, false);
2654         rbd_osd_req_format_read(obj_request);
2655
2656         ret = rbd_obj_request_submit(osdc, obj_request);
2657         if (ret)
2658                 goto out;
2659         ret = rbd_obj_request_wait(obj_request);
2660         if (ret)
2661                 goto out;
2662
2663         ret = obj_request->result;
2664         if (ret < 0)
2665                 goto out;
2666
2667         rbd_assert(obj_request->xferred < (u64)INT_MAX);
2668         ret = (int)obj_request->xferred;
2669         ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
2670 out:
2671         if (obj_request)
2672                 rbd_obj_request_put(obj_request);
2673         else
2674                 ceph_release_page_vector(pages, page_count);
2675
2676         return ret;
2677 }
2678
2679 static void rbd_request_fn(struct request_queue *q)
2680                 __releases(q->queue_lock) __acquires(q->queue_lock)
2681 {
2682         struct rbd_device *rbd_dev = q->queuedata;
2683         bool read_only = rbd_dev->mapping.read_only;
2684         struct request *rq;
2685         int result;
2686
2687         while ((rq = blk_fetch_request(q))) {
2688                 bool write_request = rq_data_dir(rq) == WRITE;
2689                 struct rbd_img_request *img_request;
2690                 u64 offset;
2691                 u64 length;
2692
2693                 /* Ignore any non-FS requests that filter through. */
2694
2695                 if (rq->cmd_type != REQ_TYPE_FS) {
2696                         dout("%s: non-fs request type %d\n", __func__,
2697                                 (int) rq->cmd_type);
2698                         __blk_end_request_all(rq, 0);
2699                         continue;
2700                 }
2701
2702                 /* Ignore/skip any zero-length requests */
2703
2704                 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
2705                 length = (u64) blk_rq_bytes(rq);
2706
2707                 if (!length) {
2708                         dout("%s: zero-length request\n", __func__);
2709                         __blk_end_request_all(rq, 0);
2710                         continue;
2711                 }
2712
2713                 spin_unlock_irq(q->queue_lock);
2714
2715                 /* Disallow writes to a read-only device */
2716
2717                 if (write_request) {
2718                         result = -EROFS;
2719                         if (read_only)
2720                                 goto end_request;
2721                         rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
2722                 }
2723
2724                 /*
2725                  * Quit early if the mapped snapshot no longer
2726                  * exists.  It's still possible the snapshot will
2727                  * have disappeared by the time our request arrives
2728                  * at the osd, but there's no sense in sending it if
2729                  * we already know.
2730                  */
2731                 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
2732                         dout("request for non-existent snapshot");
2733                         rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
2734                         result = -ENXIO;
2735                         goto end_request;
2736                 }
2737
2738                 result = -EINVAL;
2739                 if (offset && length > U64_MAX - offset + 1) {
2740                         rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
2741                                 offset, length);
2742                         goto end_request;       /* Shouldn't happen */
2743                 }
2744
2745                 result = -ENOMEM;
2746                 img_request = rbd_img_request_create(rbd_dev, offset, length,
2747                                                         write_request, false);
2748                 if (!img_request)
2749                         goto end_request;
2750
2751                 img_request->rq = rq;
2752
2753                 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2754                                                 rq->bio);
2755                 if (!result)
2756                         result = rbd_img_request_submit(img_request);
2757                 if (result)
2758                         rbd_img_request_put(img_request);
2759 end_request:
2760                 spin_lock_irq(q->queue_lock);
2761                 if (result < 0) {
2762                         rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
2763                                 write_request ? "write" : "read",
2764                                 length, offset, result);
2765
2766                         __blk_end_request_all(rq, result);
2767                 }
2768         }
2769 }
2770
2771 /*
2772  * a queue callback. Makes sure that we don't create a bio that spans across
2773  * multiple osd objects. One exception would be with a single page bios,
2774  * which we handle later at bio_chain_clone_range()
2775  */
2776 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
2777                           struct bio_vec *bvec)
2778 {
2779         struct rbd_device *rbd_dev = q->queuedata;
2780         sector_t sector_offset;
2781         sector_t sectors_per_obj;
2782         sector_t obj_sector_offset;
2783         int ret;
2784
2785         /*
2786          * Find how far into its rbd object the partition-relative
2787          * bio start sector is to offset relative to the enclosing
2788          * device.
2789          */
2790         sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
2791         sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
2792         obj_sector_offset = sector_offset & (sectors_per_obj - 1);
2793
2794         /*
2795          * Compute the number of bytes from that offset to the end
2796          * of the object.  Account for what's already used by the bio.
2797          */
2798         ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
2799         if (ret > bmd->bi_size)
2800                 ret -= bmd->bi_size;
2801         else
2802                 ret = 0;
2803
2804         /*
2805          * Don't send back more than was asked for.  And if the bio
2806          * was empty, let the whole thing through because:  "Note
2807          * that a block device *must* allow a single page to be
2808          * added to an empty bio."
2809          */
2810         rbd_assert(bvec->bv_len <= PAGE_SIZE);
2811         if (ret > (int) bvec->bv_len || !bmd->bi_size)
2812                 ret = (int) bvec->bv_len;
2813
2814         return ret;
2815 }
2816
2817 static void rbd_free_disk(struct rbd_device *rbd_dev)
2818 {
2819         struct gendisk *disk = rbd_dev->disk;
2820
2821         if (!disk)
2822                 return;
2823
2824         rbd_dev->disk = NULL;
2825         if (disk->flags & GENHD_FL_UP) {
2826                 del_gendisk(disk);
2827                 if (disk->queue)
2828                         blk_cleanup_queue(disk->queue);
2829         }
2830         put_disk(disk);
2831 }
2832
2833 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
2834                                 const char *object_name,
2835                                 u64 offset, u64 length, void *buf)
2836
2837 {
2838         struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2839         struct rbd_obj_request *obj_request;
2840         struct page **pages = NULL;
2841         u32 page_count;
2842         size_t size;
2843         int ret;
2844
2845         page_count = (u32) calc_pages_for(offset, length);
2846         pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2847         if (IS_ERR(pages))
2848                 ret = PTR_ERR(pages);
2849
2850         ret = -ENOMEM;
2851         obj_request = rbd_obj_request_create(object_name, offset, length,
2852                                                         OBJ_REQUEST_PAGES);
2853         if (!obj_request)
2854                 goto out;
2855
2856         obj_request->pages = pages;
2857         obj_request->page_count = page_count;
2858
2859         obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2860         if (!obj_request->osd_req)
2861                 goto out;
2862
2863         osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
2864                                         offset, length, 0, 0);
2865         osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
2866                                         obj_request->pages,
2867                                         obj_request->length,
2868                                         obj_request->offset & ~PAGE_MASK,
2869                                         false, false);
2870         rbd_osd_req_format_read(obj_request);
2871
2872         ret = rbd_obj_request_submit(osdc, obj_request);
2873         if (ret)
2874                 goto out;
2875         ret = rbd_obj_request_wait(obj_request);
2876         if (ret)
2877                 goto out;
2878
2879         ret = obj_request->result;
2880         if (ret < 0)
2881                 goto out;
2882
2883         rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
2884         size = (size_t) obj_request->xferred;
2885         ceph_copy_from_page_vector(pages, buf, 0, size);
2886         rbd_assert(size <= (size_t)INT_MAX);
2887         ret = (int)size;
2888 out:
2889         if (obj_request)
2890                 rbd_obj_request_put(obj_request);
2891         else
2892                 ceph_release_page_vector(pages, page_count);
2893
2894         return ret;
2895 }
2896
2897 /*
2898  * Read the complete header for the given rbd device.
2899  *
2900  * Returns a pointer to a dynamically-allocated buffer containing
2901  * the complete and validated header.  Caller can pass the address
2902  * of a variable that will be filled in with the version of the
2903  * header object at the time it was read.
2904  *
2905  * Returns a pointer-coded errno if a failure occurs.
2906  */
2907 static struct rbd_image_header_ondisk *
2908 rbd_dev_v1_header_read(struct rbd_device *rbd_dev)
2909 {
2910         struct rbd_image_header_ondisk *ondisk = NULL;
2911         u32 snap_count = 0;
2912         u64 names_size = 0;
2913         u32 want_count;
2914         int ret;
2915
2916         /*
2917          * The complete header will include an array of its 64-bit
2918          * snapshot ids, followed by the names of those snapshots as
2919          * a contiguous block of NUL-terminated strings.  Note that
2920          * the number of snapshots could change by the time we read
2921          * it in, in which case we re-read it.
2922          */
2923         do {
2924                 size_t size;
2925
2926                 kfree(ondisk);
2927
2928                 size = sizeof (*ondisk);
2929                 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
2930                 size += names_size;
2931                 ondisk = kmalloc(size, GFP_KERNEL);
2932                 if (!ondisk)
2933                         return ERR_PTR(-ENOMEM);
2934
2935                 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
2936                                        0, size, ondisk);
2937                 if (ret < 0)
2938                         goto out_err;
2939                 if ((size_t)ret < size) {
2940                         ret = -ENXIO;
2941                         rbd_warn(rbd_dev, "short header read (want %zd got %d)",
2942                                 size, ret);
2943                         goto out_err;
2944                 }
2945                 if (!rbd_dev_ondisk_valid(ondisk)) {
2946                         ret = -ENXIO;
2947                         rbd_warn(rbd_dev, "invalid header");
2948                         goto out_err;
2949                 }
2950
2951                 names_size = le64_to_cpu(ondisk->snap_names_len);
2952                 want_count = snap_count;
2953                 snap_count = le32_to_cpu(ondisk->snap_count);
2954         } while (snap_count != want_count);
2955
2956         return ondisk;
2957
2958 out_err:
2959         kfree(ondisk);
2960
2961         return ERR_PTR(ret);
2962 }
2963
2964 /*
2965  * reload the ondisk the header
2966  */
2967 static int rbd_read_header(struct rbd_device *rbd_dev,
2968                            struct rbd_image_header *header)
2969 {
2970         struct rbd_image_header_ondisk *ondisk;
2971         int ret;
2972
2973         ondisk = rbd_dev_v1_header_read(rbd_dev);
2974         if (IS_ERR(ondisk))
2975                 return PTR_ERR(ondisk);
2976         ret = rbd_header_from_disk(header, ondisk);
2977         kfree(ondisk);
2978
2979         return ret;
2980 }
2981
2982 static void rbd_remove_all_snaps(struct rbd_device *rbd_dev)
2983 {
2984         struct rbd_snap *snap;
2985         struct rbd_snap *next;
2986
2987         list_for_each_entry_safe(snap, next, &rbd_dev->snaps, node) {
2988                 list_del(&snap->node);
2989                 rbd_snap_destroy(snap);
2990         }
2991 }
2992
2993 static void rbd_update_mapping_size(struct rbd_device *rbd_dev)
2994 {
2995         if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
2996                 return;
2997
2998         if (rbd_dev->mapping.size != rbd_dev->header.image_size) {
2999                 sector_t size;
3000
3001                 rbd_dev->mapping.size = rbd_dev->header.image_size;
3002                 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3003                 dout("setting size to %llu sectors", (unsigned long long)size);
3004                 set_capacity(rbd_dev->disk, size);
3005         }
3006 }
3007
3008 /*
3009  * only read the first part of the ondisk header, without the snaps info
3010  */
3011 static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev)
3012 {
3013         int ret;
3014         struct rbd_image_header h;
3015
3016         ret = rbd_read_header(rbd_dev, &h);
3017         if (ret < 0)
3018                 return ret;
3019
3020         down_write(&rbd_dev->header_rwsem);
3021
3022         /* Update image size, and check for resize of mapped image */
3023         rbd_dev->header.image_size = h.image_size;
3024         rbd_update_mapping_size(rbd_dev);
3025
3026         /* rbd_dev->header.object_prefix shouldn't change */
3027         kfree(rbd_dev->header.snap_sizes);
3028         kfree(rbd_dev->header.snap_names);
3029         /* osd requests may still refer to snapc */
3030         ceph_put_snap_context(rbd_dev->header.snapc);
3031
3032         rbd_dev->header.image_size = h.image_size;
3033         rbd_dev->header.snapc = h.snapc;
3034         rbd_dev->header.snap_names = h.snap_names;
3035         rbd_dev->header.snap_sizes = h.snap_sizes;
3036         /* Free the extra copy of the object prefix */
3037         if (strcmp(rbd_dev->header.object_prefix, h.object_prefix))
3038                 rbd_warn(rbd_dev, "object prefix changed (ignoring)");
3039         kfree(h.object_prefix);
3040
3041         ret = rbd_dev_snaps_update(rbd_dev);
3042
3043         up_write(&rbd_dev->header_rwsem);
3044
3045         return ret;
3046 }
3047
3048 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3049 {
3050         u64 image_size;
3051         int ret;
3052
3053         rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
3054         image_size = rbd_dev->header.image_size;
3055         mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
3056         if (rbd_dev->image_format == 1)
3057                 ret = rbd_dev_v1_refresh(rbd_dev);
3058         else
3059                 ret = rbd_dev_v2_refresh(rbd_dev);
3060         mutex_unlock(&ctl_mutex);
3061         if (ret)
3062                 rbd_warn(rbd_dev, "got notification but failed to "
3063                            " update snaps: %d\n", ret);
3064         if (image_size != rbd_dev->header.image_size)
3065                 revalidate_disk(rbd_dev->disk);
3066
3067         return ret;
3068 }
3069
3070 static int rbd_init_disk(struct rbd_device *rbd_dev)
3071 {
3072         struct gendisk *disk;
3073         struct request_queue *q;
3074         u64 segment_size;
3075
3076         /* create gendisk info */
3077         disk = alloc_disk(RBD_MINORS_PER_MAJOR);
3078         if (!disk)
3079                 return -ENOMEM;
3080
3081         snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3082                  rbd_dev->dev_id);
3083         disk->major = rbd_dev->major;
3084         disk->first_minor = 0;
3085         disk->fops = &rbd_bd_ops;
3086         disk->private_data = rbd_dev;
3087
3088         q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
3089         if (!q)
3090                 goto out_disk;
3091
3092         /* We use the default size, but let's be explicit about it. */
3093         blk_queue_physical_block_size(q, SECTOR_SIZE);
3094
3095         /* set io sizes to object size */
3096         segment_size = rbd_obj_bytes(&rbd_dev->header);
3097         blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3098         blk_queue_max_segment_size(q, segment_size);
3099         blk_queue_io_min(q, segment_size);
3100         blk_queue_io_opt(q, segment_size);
3101
3102         blk_queue_merge_bvec(q, rbd_merge_bvec);
3103         disk->queue = q;
3104
3105         q->queuedata = rbd_dev;
3106
3107         rbd_dev->disk = disk;
3108
3109         return 0;
3110 out_disk:
3111         put_disk(disk);
3112
3113         return -ENOMEM;
3114 }
3115
3116 /*
3117   sysfs
3118 */
3119
3120 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3121 {
3122         return container_of(dev, struct rbd_device, dev);
3123 }
3124
3125 static ssize_t rbd_size_show(struct device *dev,
3126                              struct device_attribute *attr, char *buf)
3127 {
3128         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3129
3130         return sprintf(buf, "%llu\n",
3131                 (unsigned long long)rbd_dev->mapping.size);
3132 }
3133
3134 /*
3135  * Note this shows the features for whatever's mapped, which is not
3136  * necessarily the base image.
3137  */
3138 static ssize_t rbd_features_show(struct device *dev,
3139                              struct device_attribute *attr, char *buf)
3140 {
3141         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3142
3143         return sprintf(buf, "0x%016llx\n",
3144                         (unsigned long long)rbd_dev->mapping.features);
3145 }
3146
3147 static ssize_t rbd_major_show(struct device *dev,
3148                               struct device_attribute *attr, char *buf)
3149 {
3150         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3151
3152         if (rbd_dev->major)
3153                 return sprintf(buf, "%d\n", rbd_dev->major);
3154
3155         return sprintf(buf, "(none)\n");
3156
3157 }
3158
3159 static ssize_t rbd_client_id_show(struct device *dev,
3160                                   struct device_attribute *attr, char *buf)
3161 {
3162         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3163
3164         return sprintf(buf, "client%lld\n",
3165                         ceph_client_id(rbd_dev->rbd_client->client));
3166 }
3167
3168 static ssize_t rbd_pool_show(struct device *dev,
3169                              struct device_attribute *attr, char *buf)
3170 {
3171         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3172
3173         return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3174 }
3175
3176 static ssize_t rbd_pool_id_show(struct device *dev,
3177                              struct device_attribute *attr, char *buf)
3178 {
3179         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3180
3181         return sprintf(buf, "%llu\n",
3182                         (unsigned long long) rbd_dev->spec->pool_id);
3183 }
3184
3185 static ssize_t rbd_name_show(struct device *dev,
3186                              struct device_attribute *attr, char *buf)
3187 {
3188         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3189
3190         if (rbd_dev->spec->image_name)
3191                 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3192
3193         return sprintf(buf, "(unknown)\n");
3194 }
3195
3196 static ssize_t rbd_image_id_show(struct device *dev,
3197                              struct device_attribute *attr, char *buf)
3198 {
3199         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3200
3201         return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3202 }
3203
3204 /*
3205  * Shows the name of the currently-mapped snapshot (or
3206  * RBD_SNAP_HEAD_NAME for the base image).
3207  */
3208 static ssize_t rbd_snap_show(struct device *dev,
3209                              struct device_attribute *attr,
3210                              char *buf)
3211 {
3212         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3213
3214         return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3215 }
3216
3217 /*
3218  * For an rbd v2 image, shows the pool id, image id, and snapshot id
3219  * for the parent image.  If there is no parent, simply shows
3220  * "(no parent image)".
3221  */
3222 static ssize_t rbd_parent_show(struct device *dev,
3223                              struct device_attribute *attr,
3224                              char *buf)
3225 {
3226         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3227         struct rbd_spec *spec = rbd_dev->parent_spec;
3228         int count;
3229         char *bufp = buf;
3230
3231         if (!spec)
3232                 return sprintf(buf, "(no parent image)\n");
3233
3234         count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
3235                         (unsigned long long) spec->pool_id, spec->pool_name);
3236         if (count < 0)
3237                 return count;
3238         bufp += count;
3239
3240         count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
3241                         spec->image_name ? spec->image_name : "(unknown)");
3242         if (count < 0)
3243                 return count;
3244         bufp += count;
3245
3246         count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
3247                         (unsigned long long) spec->snap_id, spec->snap_name);
3248         if (count < 0)
3249                 return count;
3250         bufp += count;
3251
3252         count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
3253         if (count < 0)
3254                 return count;
3255         bufp += count;
3256
3257         return (ssize_t) (bufp - buf);
3258 }
3259
3260 static ssize_t rbd_image_refresh(struct device *dev,
3261                                  struct device_attribute *attr,
3262                                  const char *buf,
3263                                  size_t size)
3264 {
3265         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3266         int ret;
3267
3268         ret = rbd_dev_refresh(rbd_dev);
3269
3270         return ret < 0 ? ret : size;
3271 }
3272
3273 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3274 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3275 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3276 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3277 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3278 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3279 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3280 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3281 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3282 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3283 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3284
3285 static struct attribute *rbd_attrs[] = {
3286         &dev_attr_size.attr,
3287         &dev_attr_features.attr,
3288         &dev_attr_major.attr,
3289         &dev_attr_client_id.attr,
3290         &dev_attr_pool.attr,
3291         &dev_attr_pool_id.attr,
3292         &dev_attr_name.attr,
3293         &dev_attr_image_id.attr,
3294         &dev_attr_current_snap.attr,
3295         &dev_attr_parent.attr,
3296         &dev_attr_refresh.attr,
3297         NULL
3298 };
3299
3300 static struct attribute_group rbd_attr_group = {
3301         .attrs = rbd_attrs,
3302 };
3303
3304 static const struct attribute_group *rbd_attr_groups[] = {
3305         &rbd_attr_group,
3306         NULL
3307 };
3308
3309 static void rbd_sysfs_dev_release(struct device *dev)
3310 {
3311 }
3312
3313 static struct device_type rbd_device_type = {
3314         .name           = "rbd",
3315         .groups         = rbd_attr_groups,
3316         .release        = rbd_sysfs_dev_release,
3317 };
3318
3319 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3320 {
3321         kref_get(&spec->kref);
3322
3323         return spec;
3324 }
3325
3326 static void rbd_spec_free(struct kref *kref);
3327 static void rbd_spec_put(struct rbd_spec *spec)
3328 {
3329         if (spec)
3330                 kref_put(&spec->kref, rbd_spec_free);
3331 }
3332
3333 static struct rbd_spec *rbd_spec_alloc(void)
3334 {
3335         struct rbd_spec *spec;
3336
3337         spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3338         if (!spec)
3339                 return NULL;
3340         kref_init(&spec->kref);
3341
3342         return spec;
3343 }
3344
3345 static void rbd_spec_free(struct kref *kref)
3346 {
3347         struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3348
3349         kfree(spec->pool_name);
3350         kfree(spec->image_id);
3351         kfree(spec->image_name);
3352         kfree(spec->snap_name);
3353         kfree(spec);
3354 }
3355
3356 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
3357                                 struct rbd_spec *spec)
3358 {
3359         struct rbd_device *rbd_dev;
3360
3361         rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3362         if (!rbd_dev)
3363                 return NULL;
3364
3365         spin_lock_init(&rbd_dev->lock);
3366         rbd_dev->flags = 0;
3367         INIT_LIST_HEAD(&rbd_dev->node);
3368         INIT_LIST_HEAD(&rbd_dev->snaps);
3369         init_rwsem(&rbd_dev->header_rwsem);
3370
3371         rbd_dev->spec = spec;
3372         rbd_dev->rbd_client = rbdc;
3373
3374         /* Initialize the layout used for all rbd requests */
3375
3376         rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3377         rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3378         rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3379         rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3380
3381         return rbd_dev;
3382 }
3383
3384 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3385 {
3386         rbd_put_client(rbd_dev->rbd_client);
3387         rbd_spec_put(rbd_dev->spec);
3388         kfree(rbd_dev);
3389 }
3390
3391 static void rbd_snap_destroy(struct rbd_snap *snap)
3392 {
3393         kfree(snap->name);
3394         kfree(snap);
3395 }
3396
3397 static struct rbd_snap *rbd_snap_create(struct rbd_device *rbd_dev,
3398                                                 const char *snap_name,
3399                                                 u64 snap_id, u64 snap_size,
3400                                                 u64 snap_features)
3401 {
3402         struct rbd_snap *snap;
3403
3404         snap = kzalloc(sizeof (*snap), GFP_KERNEL);
3405         if (!snap)
3406                 return ERR_PTR(-ENOMEM);
3407
3408         snap->name = snap_name;
3409         snap->id = snap_id;
3410         snap->size = snap_size;
3411         snap->features = snap_features;
3412
3413         return snap;
3414 }
3415
3416 /*
3417  * Returns a dynamically-allocated snapshot name if successful, or a
3418  * pointer-coded error otherwise.
3419  */
3420 static const char *rbd_dev_v1_snap_info(struct rbd_device *rbd_dev, u32 which,
3421                 u64 *snap_size, u64 *snap_features)
3422 {
3423         const char *snap_name;
3424         int i;
3425
3426         rbd_assert(which < rbd_dev->header.snapc->num_snaps);
3427
3428         /* Skip over names until we find the one we are looking for */
3429
3430         snap_name = rbd_dev->header.snap_names;
3431         for (i = 0; i < which; i++)
3432                 snap_name += strlen(snap_name) + 1;
3433
3434         snap_name = kstrdup(snap_name, GFP_KERNEL);
3435         if (!snap_name)
3436                 return ERR_PTR(-ENOMEM);
3437
3438         *snap_size = rbd_dev->header.snap_sizes[which];
3439         *snap_features = 0;     /* No features for v1 */
3440
3441         return snap_name;
3442 }
3443
3444 /*
3445  * Get the size and object order for an image snapshot, or if
3446  * snap_id is CEPH_NOSNAP, gets this information for the base
3447  * image.
3448  */
3449 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3450                                 u8 *order, u64 *snap_size)
3451 {
3452         __le64 snapid = cpu_to_le64(snap_id);
3453         int ret;
3454         struct {
3455                 u8 order;
3456                 __le64 size;
3457         } __attribute__ ((packed)) size_buf = { 0 };
3458
3459         ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3460                                 "rbd", "get_size",
3461                                 &snapid, sizeof (snapid),
3462                                 &size_buf, sizeof (size_buf));
3463         dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3464         if (ret < 0)
3465                 return ret;
3466         if (ret < sizeof (size_buf))
3467                 return -ERANGE;
3468
3469         if (order)
3470                 *order = size_buf.order;
3471         *snap_size = le64_to_cpu(size_buf.size);
3472
3473         dout("  snap_id 0x%016llx order = %u, snap_size = %llu\n",
3474                 (unsigned long long)snap_id, (unsigned int)*order,
3475                 (unsigned long long)*snap_size);
3476
3477         return 0;
3478 }
3479
3480 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3481 {
3482         return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3483                                         &rbd_dev->header.obj_order,
3484                                         &rbd_dev->header.image_size);
3485 }
3486
3487 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3488 {
3489         void *reply_buf;
3490         int ret;
3491         void *p;
3492
3493         reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
3494         if (!reply_buf)
3495                 return -ENOMEM;
3496
3497         ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3498                                 "rbd", "get_object_prefix", NULL, 0,
3499                                 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
3500         dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3501         if (ret < 0)
3502                 goto out;
3503
3504         p = reply_buf;
3505         rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
3506                                                 p + ret, NULL, GFP_NOIO);
3507         ret = 0;
3508
3509         if (IS_ERR(rbd_dev->header.object_prefix)) {
3510                 ret = PTR_ERR(rbd_dev->header.object_prefix);
3511                 rbd_dev->header.object_prefix = NULL;
3512         } else {
3513                 dout("  object_prefix = %s\n", rbd_dev->header.object_prefix);
3514         }
3515 out:
3516         kfree(reply_buf);
3517
3518         return ret;
3519 }
3520
3521 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
3522                 u64 *snap_features)
3523 {
3524         __le64 snapid = cpu_to_le64(snap_id);
3525         struct {
3526                 __le64 features;
3527                 __le64 incompat;
3528         } __attribute__ ((packed)) features_buf = { 0 };
3529         u64 incompat;
3530         int ret;
3531
3532         ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3533                                 "rbd", "get_features",
3534                                 &snapid, sizeof (snapid),
3535                                 &features_buf, sizeof (features_buf));
3536         dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3537         if (ret < 0)
3538                 return ret;
3539         if (ret < sizeof (features_buf))
3540                 return -ERANGE;
3541
3542         incompat = le64_to_cpu(features_buf.incompat);
3543         if (incompat & ~RBD_FEATURES_SUPPORTED)
3544                 return -ENXIO;
3545
3546         *snap_features = le64_to_cpu(features_buf.features);
3547
3548         dout("  snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
3549                 (unsigned long long)snap_id,
3550                 (unsigned long long)*snap_features,
3551                 (unsigned long long)le64_to_cpu(features_buf.incompat));
3552
3553         return 0;
3554 }
3555
3556 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
3557 {
3558         return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
3559                                                 &rbd_dev->header.features);
3560 }
3561
3562 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3563 {
3564         struct rbd_spec *parent_spec;
3565         size_t size;
3566         void *reply_buf = NULL;
3567         __le64 snapid;
3568         void *p;
3569         void *end;
3570         char *image_id;
3571         u64 overlap;
3572         int ret;
3573
3574         parent_spec = rbd_spec_alloc();
3575         if (!parent_spec)
3576                 return -ENOMEM;
3577
3578         size = sizeof (__le64) +                                /* pool_id */
3579                 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX +        /* image_id */
3580                 sizeof (__le64) +                               /* snap_id */
3581                 sizeof (__le64);                                /* overlap */
3582         reply_buf = kmalloc(size, GFP_KERNEL);
3583         if (!reply_buf) {
3584                 ret = -ENOMEM;
3585                 goto out_err;
3586         }
3587
3588         snapid = cpu_to_le64(CEPH_NOSNAP);
3589         ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3590                                 "rbd", "get_parent",
3591                                 &snapid, sizeof (snapid),
3592                                 reply_buf, size);
3593         dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3594         if (ret < 0)
3595                 goto out_err;
3596
3597         p = reply_buf;
3598         end = reply_buf + ret;
3599         ret = -ERANGE;
3600         ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err);
3601         if (parent_spec->pool_id == CEPH_NOPOOL)
3602                 goto out;       /* No parent?  No problem. */
3603
3604         /* The ceph file layout needs to fit pool id in 32 bits */
3605
3606         ret = -EIO;
3607         if (parent_spec->pool_id > (u64)U32_MAX) {
3608                 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
3609                         (unsigned long long)parent_spec->pool_id, U32_MAX);
3610                 goto out_err;
3611         }
3612
3613         image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3614         if (IS_ERR(image_id)) {
3615                 ret = PTR_ERR(image_id);
3616                 goto out_err;
3617         }
3618         parent_spec->image_id = image_id;
3619         ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
3620         ceph_decode_64_safe(&p, end, overlap, out_err);
3621
3622         rbd_dev->parent_overlap = overlap;
3623         rbd_dev->parent_spec = parent_spec;
3624         parent_spec = NULL;     /* rbd_dev now owns this */
3625 out:
3626         ret = 0;
3627 out_err:
3628         kfree(reply_buf);
3629         rbd_spec_put(parent_spec);
3630
3631         return ret;
3632 }
3633
3634 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
3635 {
3636         struct {
3637                 __le64 stripe_unit;
3638                 __le64 stripe_count;
3639         } __attribute__ ((packed)) striping_info_buf = { 0 };
3640         size_t size = sizeof (striping_info_buf);
3641         void *p;
3642         u64 obj_size;
3643         u64 stripe_unit;
3644         u64 stripe_count;
3645         int ret;
3646
3647         ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3648                                 "rbd", "get_stripe_unit_count", NULL, 0,
3649                                 (char *)&striping_info_buf, size);
3650         dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3651         if (ret < 0)
3652                 return ret;
3653         if (ret < size)
3654                 return -ERANGE;
3655
3656         /*
3657          * We don't actually support the "fancy striping" feature
3658          * (STRIPINGV2) yet, but if the striping sizes are the
3659          * defaults the behavior is the same as before.  So find
3660          * out, and only fail if the image has non-default values.
3661          */
3662         ret = -EINVAL;
3663         obj_size = (u64)1 << rbd_dev->header.obj_order;
3664         p = &striping_info_buf;
3665         stripe_unit = ceph_decode_64(&p);
3666         if (stripe_unit != obj_size) {
3667                 rbd_warn(rbd_dev, "unsupported stripe unit "
3668                                 "(got %llu want %llu)",
3669                                 stripe_unit, obj_size);
3670                 return -EINVAL;
3671         }
3672         stripe_count = ceph_decode_64(&p);
3673         if (stripe_count != 1) {
3674                 rbd_warn(rbd_dev, "unsupported stripe count "
3675                                 "(got %llu want 1)", stripe_count);
3676                 return -EINVAL;
3677         }
3678         rbd_dev->header.stripe_unit = stripe_unit;
3679         rbd_dev->header.stripe_count = stripe_count;
3680
3681         return 0;
3682 }
3683
3684 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
3685 {
3686         size_t image_id_size;
3687         char *image_id;
3688         void *p;
3689         void *end;
3690         size_t size;
3691         void *reply_buf = NULL;
3692         size_t len = 0;
3693         char *image_name = NULL;
3694         int ret;
3695
3696         rbd_assert(!rbd_dev->spec->image_name);
3697
3698         len = strlen(rbd_dev->spec->image_id);
3699         image_id_size = sizeof (__le32) + len;
3700         image_id = kmalloc(image_id_size, GFP_KERNEL);
3701         if (!image_id)
3702                 return NULL;
3703
3704         p = image_id;
3705         end = image_id + image_id_size;
3706         ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
3707
3708         size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
3709         reply_buf = kmalloc(size, GFP_KERNEL);
3710         if (!reply_buf)
3711                 goto out;
3712
3713         ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
3714                                 "rbd", "dir_get_name",
3715                                 image_id, image_id_size,
3716                                 reply_buf, size);
3717         if (ret < 0)
3718                 goto out;
3719         p = reply_buf;
3720         end = reply_buf + ret;
3721
3722         image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
3723         if (IS_ERR(image_name))
3724                 image_name = NULL;
3725         else
3726                 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
3727 out:
3728         kfree(reply_buf);
3729         kfree(image_id);
3730
3731         return image_name;
3732 }
3733
3734 /*
3735  * When an rbd image has a parent image, it is identified by the
3736  * pool, image, and snapshot ids (not names).  This function fills
3737  * in the names for those ids.  (It's OK if we can't figure out the
3738  * name for an image id, but the pool and snapshot ids should always
3739  * exist and have names.)  All names in an rbd spec are dynamically
3740  * allocated.
3741  *
3742  * When an image being mapped (not a parent) is probed, we have the
3743  * pool name and pool id, image name and image id, and the snapshot
3744  * name.  The only thing we're missing is the snapshot id.
3745  *
3746  * The set of snapshots for an image is not known until they have
3747  * been read by rbd_dev_snaps_update(), so we can't completely fill
3748  * in this information until after that has been called.
3749  */
3750 static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
3751 {
3752         struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3753         struct rbd_spec *spec = rbd_dev->spec;
3754         const char *pool_name;
3755         const char *image_name;
3756         const char *snap_name;
3757         int ret;
3758
3759         /*
3760          * An image being mapped will have the pool name (etc.), but
3761          * we need to look up the snapshot id.
3762          */
3763         if (spec->pool_name) {
3764                 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
3765                         struct rbd_snap *snap;
3766
3767                         snap = snap_by_name(rbd_dev, spec->snap_name);
3768                         if (!snap)
3769                                 return -ENOENT;
3770                         spec->snap_id = snap->id;
3771                 } else {
3772                         spec->snap_id = CEPH_NOSNAP;
3773                 }
3774
3775                 return 0;
3776         }
3777
3778         /* Get the pool name; we have to make our own copy of this */
3779
3780         pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
3781         if (!pool_name) {
3782                 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
3783                 return -EIO;
3784         }
3785         pool_name = kstrdup(pool_name, GFP_KERNEL);
3786         if (!pool_name)
3787                 return -ENOMEM;
3788
3789         /* Fetch the image name; tolerate failure here */
3790
3791         image_name = rbd_dev_image_name(rbd_dev);
3792         if (!image_name)
3793                 rbd_warn(rbd_dev, "unable to get image name");
3794
3795         /* Look up the snapshot name, and make a copy */
3796
3797         snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
3798         if (!snap_name) {
3799                 rbd_warn(rbd_dev, "no snapshot with id %llu", spec->snap_id);
3800                 ret = -EIO;
3801                 goto out_err;
3802         }
3803         snap_name = kstrdup(snap_name, GFP_KERNEL);
3804         if (!snap_name) {
3805                 ret = -ENOMEM;
3806                 goto out_err;
3807         }
3808
3809         spec->pool_name = pool_name;
3810         spec->image_name = image_name;
3811         spec->snap_name = snap_name;
3812
3813         return 0;
3814 out_err:
3815         kfree(image_name);
3816         kfree(pool_name);
3817
3818         return ret;
3819 }
3820
3821 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
3822 {
3823         size_t size;
3824         int ret;
3825         void *reply_buf;
3826         void *p;
3827         void *end;
3828         u64 seq;
3829         u32 snap_count;
3830         struct ceph_snap_context *snapc;
3831         u32 i;
3832
3833         /*
3834          * We'll need room for the seq value (maximum snapshot id),
3835          * snapshot count, and array of that many snapshot ids.
3836          * For now we have a fixed upper limit on the number we're
3837          * prepared to receive.
3838          */
3839         size = sizeof (__le64) + sizeof (__le32) +
3840                         RBD_MAX_SNAP_COUNT * sizeof (__le64);
3841         reply_buf = kzalloc(size, GFP_KERNEL);
3842         if (!reply_buf)
3843                 return -ENOMEM;
3844
3845         ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3846                                 "rbd", "get_snapcontext", NULL, 0,
3847                                 reply_buf, size);
3848         dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3849         if (ret < 0)
3850                 goto out;
3851
3852         p = reply_buf;
3853         end = reply_buf + ret;
3854         ret = -ERANGE;
3855         ceph_decode_64_safe(&p, end, seq, out);
3856         ceph_decode_32_safe(&p, end, snap_count, out);
3857
3858         /*
3859          * Make sure the reported number of snapshot ids wouldn't go
3860          * beyond the end of our buffer.  But before checking that,
3861          * make sure the computed size of the snapshot context we
3862          * allocate is representable in a size_t.
3863          */
3864         if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
3865                                  / sizeof (u64)) {
3866                 ret = -EINVAL;
3867                 goto out;
3868         }
3869         if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
3870                 goto out;
3871         ret = 0;
3872
3873         snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
3874         if (!snapc) {
3875                 ret = -ENOMEM;
3876                 goto out;
3877         }
3878         snapc->seq = seq;
3879         for (i = 0; i < snap_count; i++)
3880                 snapc->snaps[i] = ceph_decode_64(&p);
3881
3882         rbd_dev->header.snapc = snapc;
3883
3884         dout("  snap context seq = %llu, snap_count = %u\n",
3885                 (unsigned long long)seq, (unsigned int)snap_count);
3886 out:
3887         kfree(reply_buf);
3888
3889         return ret;
3890 }
3891
3892 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev, u32 which)
3893 {
3894         size_t size;
3895         void *reply_buf;
3896         __le64 snap_id;
3897         int ret;
3898         void *p;
3899         void *end;
3900         char *snap_name;
3901
3902         size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
3903         reply_buf = kmalloc(size, GFP_KERNEL);
3904         if (!reply_buf)
3905                 return ERR_PTR(-ENOMEM);
3906
3907         rbd_assert(which < rbd_dev->header.snapc->num_snaps);
3908         snap_id = cpu_to_le64(rbd_dev->header.snapc->snaps[which]);
3909         ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3910                                 "rbd", "get_snapshot_name",
3911                                 &snap_id, sizeof (snap_id),
3912                                 reply_buf, size);
3913         dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3914         if (ret < 0) {
3915                 snap_name = ERR_PTR(ret);
3916                 goto out;
3917         }
3918
3919         p = reply_buf;
3920         end = reply_buf + ret;
3921         snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3922         if (IS_ERR(snap_name))
3923                 goto out;
3924
3925         dout("  snap_id 0x%016llx snap_name = %s\n",
3926                 (unsigned long long)le64_to_cpu(snap_id), snap_name);
3927 out:
3928         kfree(reply_buf);
3929
3930         return snap_name;
3931 }
3932
3933 static const char *rbd_dev_v2_snap_info(struct rbd_device *rbd_dev, u32 which,
3934                 u64 *snap_size, u64 *snap_features)
3935 {
3936         u64 snap_id;
3937         u64 size;
3938         u64 features;
3939         const char *snap_name;
3940         int ret;
3941
3942         rbd_assert(which < rbd_dev->header.snapc->num_snaps);
3943         snap_id = rbd_dev->header.snapc->snaps[which];
3944         ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
3945         if (ret)
3946                 goto out_err;
3947
3948         ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
3949         if (ret)
3950                 goto out_err;
3951
3952         snap_name = rbd_dev_v2_snap_name(rbd_dev, which);
3953         if (!IS_ERR(snap_name)) {
3954                 *snap_size = size;
3955                 *snap_features = features;
3956         }
3957
3958         return snap_name;
3959 out_err:
3960         return ERR_PTR(ret);
3961 }
3962
3963 static const char *rbd_dev_snap_info(struct rbd_device *rbd_dev, u32 which,
3964                 u64 *snap_size, u64 *snap_features)
3965 {
3966         if (rbd_dev->image_format == 1)
3967                 return rbd_dev_v1_snap_info(rbd_dev, which,
3968                                         snap_size, snap_features);
3969         if (rbd_dev->image_format == 2)
3970                 return rbd_dev_v2_snap_info(rbd_dev, which,
3971                                         snap_size, snap_features);
3972         return ERR_PTR(-EINVAL);
3973 }
3974
3975 static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev)
3976 {
3977         int ret;
3978
3979         down_write(&rbd_dev->header_rwsem);
3980
3981         ret = rbd_dev_v2_image_size(rbd_dev);
3982         if (ret)
3983                 goto out;
3984         rbd_update_mapping_size(rbd_dev);
3985
3986         ret = rbd_dev_v2_snap_context(rbd_dev);
3987         dout("rbd_dev_v2_snap_context returned %d\n", ret);
3988         if (ret)
3989                 goto out;
3990         ret = rbd_dev_snaps_update(rbd_dev);
3991         dout("rbd_dev_snaps_update returned %d\n", ret);
3992         if (ret)
3993                 goto out;
3994 out:
3995         up_write(&rbd_dev->header_rwsem);
3996
3997         return ret;
3998 }
3999
4000 /*
4001  * Scan the rbd device's current snapshot list and compare it to the
4002  * newly-received snapshot context.  Remove any existing snapshots
4003  * not present in the new snapshot context.  Add a new snapshot for
4004  * any snaphots in the snapshot context not in the current list.
4005  * And verify there are no changes to snapshots we already know
4006  * about.
4007  *
4008  * Assumes the snapshots in the snapshot context are sorted by
4009  * snapshot id, highest id first.  (Snapshots in the rbd_dev's list
4010  * are also maintained in that order.)
4011  *
4012  * Note that any error occurs while updating the snapshot list
4013  * aborts the update, and the entire list is cleared.  The snapshot
4014  * list becomes inconsistent at that point anyway, so it might as
4015  * well be empty.
4016  */
4017 static int rbd_dev_snaps_update(struct rbd_device *rbd_dev)
4018 {
4019         struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4020         const u32 snap_count = snapc->num_snaps;
4021         struct list_head *head = &rbd_dev->snaps;
4022         struct list_head *links = head->next;
4023         u32 index = 0;
4024         int ret = 0;
4025
4026         dout("%s: snap count is %u\n", __func__, (unsigned int)snap_count);
4027         while (index < snap_count || links != head) {
4028                 u64 snap_id;
4029                 struct rbd_snap *snap;
4030                 const char *snap_name;
4031                 u64 snap_size = 0;
4032                 u64 snap_features = 0;
4033
4034                 snap_id = index < snap_count ? snapc->snaps[index]
4035                                              : CEPH_NOSNAP;
4036                 snap = links != head ? list_entry(links, struct rbd_snap, node)
4037                                      : NULL;
4038                 rbd_assert(!snap || snap->id != CEPH_NOSNAP);
4039
4040                 if (snap_id == CEPH_NOSNAP || (snap && snap->id > snap_id)) {
4041                         struct list_head *next = links->next;
4042
4043                         /*
4044                          * A previously-existing snapshot is not in
4045                          * the new snap context.
4046                          *
4047                          * If the now-missing snapshot is the one
4048                          * the image represents, clear its existence
4049                          * flag so we can avoid sending any more
4050                          * requests to it.
4051                          */
4052                         if (rbd_dev->spec->snap_id == snap->id)
4053                                 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4054                         dout("removing %ssnap id %llu\n",
4055                                 rbd_dev->spec->snap_id == snap->id ?
4056                                                         "mapped " : "",
4057                                 (unsigned long long)snap->id);
4058
4059                         list_del(&snap->node);
4060                         rbd_snap_destroy(snap);
4061
4062                         /* Done with this list entry; advance */
4063
4064                         links = next;
4065                         continue;
4066                 }
4067
4068                 snap_name = rbd_dev_snap_info(rbd_dev, index,
4069                                         &snap_size, &snap_features);
4070                 if (IS_ERR(snap_name)) {
4071                         ret = PTR_ERR(snap_name);
4072                         dout("failed to get snap info, error %d\n", ret);
4073                         goto out_err;
4074                 }
4075
4076                 dout("entry %u: snap_id = %llu\n", (unsigned int)snap_count,
4077                         (unsigned long long)snap_id);
4078                 if (!snap || (snap_id != CEPH_NOSNAP && snap->id < snap_id)) {
4079                         struct rbd_snap *new_snap;
4080
4081                         /* We haven't seen this snapshot before */
4082
4083                         new_snap = rbd_snap_create(rbd_dev, snap_name,
4084                                         snap_id, snap_size, snap_features);
4085                         if (IS_ERR(new_snap)) {
4086                                 ret = PTR_ERR(new_snap);
4087                                 dout("  failed to add dev, error %d\n", ret);
4088                                 goto out_err;
4089                         }
4090
4091                         /* New goes before existing, or at end of list */
4092
4093                         dout("  added dev%s\n", snap ? "" : " at end\n");
4094                         if (snap)
4095                                 list_add_tail(&new_snap->node, &snap->node);
4096                         else
4097                                 list_add_tail(&new_snap->node, head);
4098                 } else {
4099                         /* Already have this one */
4100
4101                         dout("  already present\n");
4102
4103                         rbd_assert(snap->size == snap_size);
4104                         rbd_assert(!strcmp(snap->name, snap_name));
4105                         rbd_assert(snap->features == snap_features);
4106
4107                         /* Done with this list entry; advance */
4108
4109                         links = links->next;
4110                 }
4111
4112                 /* Advance to the next entry in the snapshot context */
4113
4114                 index++;
4115         }
4116         dout("%s: done\n", __func__);
4117
4118         return 0;
4119 out_err:
4120         rbd_remove_all_snaps(rbd_dev);
4121
4122         return ret;
4123 }
4124
4125 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4126 {
4127         struct device *dev;
4128         int ret;
4129
4130         mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
4131
4132         dev = &rbd_dev->dev;
4133         dev->bus = &rbd_bus_type;
4134         dev->type = &rbd_device_type;
4135         dev->parent = &rbd_root_dev;
4136         dev->release = rbd_dev_device_release;
4137         dev_set_name(dev, "%d", rbd_dev->dev_id);
4138         ret = device_register(dev);
4139
4140         mutex_unlock(&ctl_mutex);
4141
4142         return ret;
4143 }
4144
4145 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4146 {
4147         device_unregister(&rbd_dev->dev);
4148 }
4149
4150 static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
4151
4152 /*
4153  * Get a unique rbd identifier for the given new rbd_dev, and add
4154  * the rbd_dev to the global list.  The minimum rbd id is 1.
4155  */
4156 static void rbd_dev_id_get(struct rbd_device *rbd_dev)
4157 {
4158         rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
4159
4160         spin_lock(&rbd_dev_list_lock);
4161         list_add_tail(&rbd_dev->node, &rbd_dev_list);
4162         spin_unlock(&rbd_dev_list_lock);
4163         dout("rbd_dev %p given dev id %llu\n", rbd_dev,
4164                 (unsigned long long) rbd_dev->dev_id);
4165 }
4166
4167 /*
4168  * Remove an rbd_dev from the global list, and record that its
4169  * identifier is no longer in use.
4170  */
4171 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4172 {
4173         struct list_head *tmp;
4174         int rbd_id = rbd_dev->dev_id;
4175         int max_id;
4176
4177         rbd_assert(rbd_id > 0);
4178
4179         dout("rbd_dev %p released dev id %llu\n", rbd_dev,
4180                 (unsigned long long) rbd_dev->dev_id);
4181         spin_lock(&rbd_dev_list_lock);
4182         list_del_init(&rbd_dev->node);
4183
4184         /*
4185          * If the id being "put" is not the current maximum, there
4186          * is nothing special we need to do.
4187          */
4188         if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
4189                 spin_unlock(&rbd_dev_list_lock);
4190                 return;
4191         }
4192
4193         /*
4194          * We need to update the current maximum id.  Search the
4195          * list to find out what it is.  We're more likely to find
4196          * the maximum at the end, so search the list backward.
4197          */
4198         max_id = 0;
4199         list_for_each_prev(tmp, &rbd_dev_list) {
4200                 struct rbd_device *rbd_dev;
4201
4202                 rbd_dev = list_entry(tmp, struct rbd_device, node);
4203                 if (rbd_dev->dev_id > max_id)
4204                         max_id = rbd_dev->dev_id;
4205         }
4206         spin_unlock(&rbd_dev_list_lock);
4207
4208         /*
4209          * The max id could have been updated by rbd_dev_id_get(), in
4210          * which case it now accurately reflects the new maximum.
4211          * Be careful not to overwrite the maximum value in that
4212          * case.
4213          */
4214         atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
4215         dout("  max dev id has been reset\n");
4216 }
4217
4218 /*
4219  * Skips over white space at *buf, and updates *buf to point to the
4220  * first found non-space character (if any). Returns the length of
4221  * the token (string of non-white space characters) found.  Note
4222  * that *buf must be terminated with '\0'.
4223  */
4224 static inline size_t next_token(const char **buf)
4225 {
4226         /*
4227         * These are the characters that produce nonzero for
4228         * isspace() in the "C" and "POSIX" locales.
4229         */
4230         const char *spaces = " \f\n\r\t\v";
4231
4232         *buf += strspn(*buf, spaces);   /* Find start of token */
4233
4234         return strcspn(*buf, spaces);   /* Return token length */
4235 }
4236
4237 /*
4238  * Finds the next token in *buf, and if the provided token buffer is
4239  * big enough, copies the found token into it.  The result, if
4240  * copied, is guaranteed to be terminated with '\0'.  Note that *buf
4241  * must be terminated with '\0' on entry.
4242  *
4243  * Returns the length of the token found (not including the '\0').
4244  * Return value will be 0 if no token is found, and it will be >=
4245  * token_size if the token would not fit.
4246  *
4247  * The *buf pointer will be updated to point beyond the end of the
4248  * found token.  Note that this occurs even if the token buffer is
4249  * too small to hold it.
4250  */
4251 static inline size_t copy_token(const char **buf,
4252                                 char *token,
4253                                 size_t token_size)
4254 {
4255         size_t len;
4256
4257         len = next_token(buf);
4258         if (len < token_size) {
4259                 memcpy(token, *buf, len);
4260                 *(token + len) = '\0';
4261         }
4262         *buf += len;
4263
4264         return len;
4265 }
4266
4267 /*
4268  * Finds the next token in *buf, dynamically allocates a buffer big
4269  * enough to hold a copy of it, and copies the token into the new
4270  * buffer.  The copy is guaranteed to be terminated with '\0'.  Note
4271  * that a duplicate buffer is created even for a zero-length token.
4272  *
4273  * Returns a pointer to the newly-allocated duplicate, or a null
4274  * pointer if memory for the duplicate was not available.  If
4275  * the lenp argument is a non-null pointer, the length of the token
4276  * (not including the '\0') is returned in *lenp.
4277  *
4278  * If successful, the *buf pointer will be updated to point beyond
4279  * the end of the found token.
4280  *
4281  * Note: uses GFP_KERNEL for allocation.
4282  */
4283 static inline char *dup_token(const char **buf, size_t *lenp)
4284 {
4285         char *dup;
4286         size_t len;
4287
4288         len = next_token(buf);
4289         dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4290         if (!dup)
4291                 return NULL;
4292         *(dup + len) = '\0';
4293         *buf += len;
4294
4295         if (lenp)
4296                 *lenp = len;
4297
4298         return dup;
4299 }
4300
4301 /*
4302  * Parse the options provided for an "rbd add" (i.e., rbd image
4303  * mapping) request.  These arrive via a write to /sys/bus/rbd/add,
4304  * and the data written is passed here via a NUL-terminated buffer.
4305  * Returns 0 if successful or an error code otherwise.
4306  *
4307  * The information extracted from these options is recorded in
4308  * the other parameters which return dynamically-allocated
4309  * structures:
4310  *  ceph_opts
4311  *      The address of a pointer that will refer to a ceph options
4312  *      structure.  Caller must release the returned pointer using
4313  *      ceph_destroy_options() when it is no longer needed.
4314  *  rbd_opts
4315  *      Address of an rbd options pointer.  Fully initialized by
4316  *      this function; caller must release with kfree().
4317  *  spec
4318  *      Address of an rbd image specification pointer.  Fully
4319  *      initialized by this function based on parsed options.
4320  *      Caller must release with rbd_spec_put().
4321  *
4322  * The options passed take this form:
4323  *  <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4324  * where:
4325  *  <mon_addrs>
4326  *      A comma-separated list of one or more monitor addresses.
4327  *      A monitor address is an ip address, optionally followed
4328  *      by a port number (separated by a colon).
4329  *        I.e.:  ip1[:port1][,ip2[:port2]...]
4330  *  <options>
4331  *      A comma-separated list of ceph and/or rbd options.
4332  *  <pool_name>
4333  *      The name of the rados pool containing the rbd image.
4334  *  <image_name>
4335  *      The name of the image in that pool to map.
4336  *  <snap_id>
4337  *      An optional snapshot id.  If provided, the mapping will
4338  *      present data from the image at the time that snapshot was
4339  *      created.  The image head is used if no snapshot id is
4340  *      provided.  Snapshot mappings are always read-only.
4341  */
4342 static int rbd_add_parse_args(const char *buf,
4343                                 struct ceph_options **ceph_opts,
4344                                 struct rbd_options **opts,
4345                                 struct rbd_spec **rbd_spec)
4346 {
4347         size_t len;
4348         char *options;
4349         const char *mon_addrs;
4350         char *snap_name;
4351         size_t mon_addrs_size;
4352         struct rbd_spec *spec = NULL;
4353         struct rbd_options *rbd_opts = NULL;
4354         struct ceph_options *copts;
4355         int ret;
4356
4357         /* The first four tokens are required */
4358
4359         len = next_token(&buf);
4360         if (!len) {
4361                 rbd_warn(NULL, "no monitor address(es) provided");
4362                 return -EINVAL;
4363         }
4364         mon_addrs = buf;
4365         mon_addrs_size = len + 1;
4366         buf += len;
4367
4368         ret = -EINVAL;
4369         options = dup_token(&buf, NULL);
4370         if (!options)
4371                 return -ENOMEM;
4372         if (!*options) {
4373                 rbd_warn(NULL, "no options provided");
4374                 goto out_err;
4375         }
4376
4377         spec = rbd_spec_alloc();
4378         if (!spec)
4379                 goto out_mem;
4380
4381         spec->pool_name = dup_token(&buf, NULL);
4382         if (!spec->pool_name)
4383                 goto out_mem;
4384         if (!*spec->pool_name) {
4385                 rbd_warn(NULL, "no pool name provided");
4386                 goto out_err;
4387         }
4388
4389         spec->image_name = dup_token(&buf, NULL);
4390         if (!spec->image_name)
4391                 goto out_mem;
4392         if (!*spec->image_name) {
4393                 rbd_warn(NULL, "no image name provided");
4394                 goto out_err;
4395         }
4396
4397         /*
4398          * Snapshot name is optional; default is to use "-"
4399          * (indicating the head/no snapshot).
4400          */
4401         len = next_token(&buf);
4402         if (!len) {
4403                 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4404                 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4405         } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4406                 ret = -ENAMETOOLONG;
4407                 goto out_err;
4408         }
4409         snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4410         if (!snap_name)
4411                 goto out_mem;
4412         *(snap_name + len) = '\0';
4413         spec->snap_name = snap_name;
4414
4415         /* Initialize all rbd options to the defaults */
4416
4417         rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4418         if (!rbd_opts)
4419                 goto out_mem;
4420
4421         rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4422
4423         copts = ceph_parse_options(options, mon_addrs,
4424                                         mon_addrs + mon_addrs_size - 1,
4425                                         parse_rbd_opts_token, rbd_opts);
4426         if (IS_ERR(copts)) {
4427                 ret = PTR_ERR(copts);
4428                 goto out_err;
4429         }
4430         kfree(options);
4431
4432         *ceph_opts = copts;
4433         *opts = rbd_opts;
4434         *rbd_spec = spec;
4435
4436         return 0;
4437 out_mem:
4438         ret = -ENOMEM;
4439 out_err:
4440         kfree(rbd_opts);
4441         rbd_spec_put(spec);
4442         kfree(options);
4443
4444         return ret;
4445 }
4446
4447 /*
4448  * An rbd format 2 image has a unique identifier, distinct from the
4449  * name given to it by the user.  Internally, that identifier is
4450  * what's used to specify the names of objects related to the image.
4451  *
4452  * A special "rbd id" object is used to map an rbd image name to its
4453  * id.  If that object doesn't exist, then there is no v2 rbd image
4454  * with the supplied name.
4455  *
4456  * This function will record the given rbd_dev's image_id field if
4457  * it can be determined, and in that case will return 0.  If any
4458  * errors occur a negative errno will be returned and the rbd_dev's
4459  * image_id field will be unchanged (and should be NULL).
4460  */
4461 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4462 {
4463         int ret;
4464         size_t size;
4465         char *object_name;
4466         void *response;
4467         char *image_id;
4468
4469         /*
4470          * When probing a parent image, the image id is already
4471          * known (and the image name likely is not).  There's no
4472          * need to fetch the image id again in this case.  We
4473          * do still need to set the image format though.
4474          */
4475         if (rbd_dev->spec->image_id) {
4476                 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4477
4478                 return 0;
4479         }
4480
4481         /*
4482          * First, see if the format 2 image id file exists, and if
4483          * so, get the image's persistent id from it.
4484          */
4485         size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
4486         object_name = kmalloc(size, GFP_NOIO);
4487         if (!object_name)
4488                 return -ENOMEM;
4489         sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
4490         dout("rbd id object name is %s\n", object_name);
4491
4492         /* Response will be an encoded string, which includes a length */
4493
4494         size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4495         response = kzalloc(size, GFP_NOIO);
4496         if (!response) {
4497                 ret = -ENOMEM;
4498                 goto out;
4499         }
4500
4501         /* If it doesn't exist we'll assume it's a format 1 image */
4502
4503         ret = rbd_obj_method_sync(rbd_dev, object_name,
4504                                 "rbd", "get_id", NULL, 0,
4505                                 response, RBD_IMAGE_ID_LEN_MAX);
4506         dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4507         if (ret == -ENOENT) {
4508                 image_id = kstrdup("", GFP_KERNEL);
4509                 ret = image_id ? 0 : -ENOMEM;
4510                 if (!ret)
4511                         rbd_dev->image_format = 1;
4512         } else if (ret > sizeof (__le32)) {
4513                 void *p = response;
4514
4515                 image_id = ceph_extract_encoded_string(&p, p + ret,
4516                                                 NULL, GFP_NOIO);
4517                 ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0;
4518                 if (!ret)
4519                         rbd_dev->image_format = 2;
4520         } else {
4521                 ret = -EINVAL;
4522         }
4523
4524         if (!ret) {
4525                 rbd_dev->spec->image_id = image_id;
4526                 dout("image_id is %s\n", image_id);
4527         }
4528 out:
4529         kfree(response);
4530         kfree(object_name);
4531
4532         return ret;
4533 }
4534
4535 /* Undo whatever state changes are made by v1 or v2 image probe */
4536
4537 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4538 {
4539         struct rbd_image_header *header;
4540
4541         rbd_dev_remove_parent(rbd_dev);
4542         rbd_spec_put(rbd_dev->parent_spec);
4543         rbd_dev->parent_spec = NULL;
4544         rbd_dev->parent_overlap = 0;
4545
4546         /* Free dynamic fields from the header, then zero it out */
4547
4548         header = &rbd_dev->header;
4549         ceph_put_snap_context(header->snapc);
4550         kfree(header->snap_sizes);
4551         kfree(header->snap_names);
4552         kfree(header->object_prefix);
4553         memset(header, 0, sizeof (*header));
4554 }
4555
4556 static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
4557 {
4558         int ret;
4559
4560         /* Populate rbd image metadata */
4561
4562         ret = rbd_read_header(rbd_dev, &rbd_dev->header);
4563         if (ret < 0)
4564                 goto out_err;
4565
4566         /* Version 1 images have no parent (no layering) */
4567
4568         rbd_dev->parent_spec = NULL;
4569         rbd_dev->parent_overlap = 0;
4570
4571         dout("discovered version 1 image, header name is %s\n",
4572                 rbd_dev->header_name);
4573
4574         return 0;
4575
4576 out_err:
4577         kfree(rbd_dev->header_name);
4578         rbd_dev->header_name = NULL;
4579         kfree(rbd_dev->spec->image_id);
4580         rbd_dev->spec->image_id = NULL;
4581
4582         return ret;
4583 }
4584
4585 static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
4586 {
4587         int ret;
4588
4589         ret = rbd_dev_v2_image_size(rbd_dev);
4590         if (ret)
4591                 goto out_err;
4592
4593         /* Get the object prefix (a.k.a. block_name) for the image */
4594
4595         ret = rbd_dev_v2_object_prefix(rbd_dev);
4596         if (ret)
4597                 goto out_err;
4598
4599         /* Get the and check features for the image */
4600
4601         ret = rbd_dev_v2_features(rbd_dev);
4602         if (ret)
4603                 goto out_err;
4604
4605         /* If the image supports layering, get the parent info */
4606
4607         if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
4608                 ret = rbd_dev_v2_parent_info(rbd_dev);
4609                 if (ret)
4610                         goto out_err;
4611
4612                 /*
4613                  * Don't print a warning for parent images.  We can
4614                  * tell this point because we won't know its pool
4615                  * name yet (just its pool id).
4616                  */
4617                 if (rbd_dev->spec->pool_name)
4618                         rbd_warn(rbd_dev, "WARNING: kernel layering "
4619                                         "is EXPERIMENTAL!");
4620         }
4621
4622         /* If the image supports fancy striping, get its parameters */
4623
4624         if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
4625                 ret = rbd_dev_v2_striping_info(rbd_dev);
4626                 if (ret < 0)
4627                         goto out_err;
4628         }
4629
4630         /* crypto and compression type aren't (yet) supported for v2 images */
4631
4632         rbd_dev->header.crypt_type = 0;
4633         rbd_dev->header.comp_type = 0;
4634
4635         /* Get the snapshot context, plus the header version */
4636
4637         ret = rbd_dev_v2_snap_context(rbd_dev);
4638         if (ret)
4639                 goto out_err;
4640
4641         dout("discovered version 2 image, header name is %s\n",
4642                 rbd_dev->header_name);
4643
4644         return 0;
4645 out_err:
4646         rbd_dev->parent_overlap = 0;
4647         rbd_spec_put(rbd_dev->parent_spec);
4648         rbd_dev->parent_spec = NULL;
4649         kfree(rbd_dev->header_name);
4650         rbd_dev->header_name = NULL;
4651         kfree(rbd_dev->header.object_prefix);
4652         rbd_dev->header.object_prefix = NULL;
4653
4654         return ret;
4655 }
4656
4657 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
4658 {
4659         struct rbd_device *parent = NULL;
4660         struct rbd_spec *parent_spec;
4661         struct rbd_client *rbdc;
4662         int ret;
4663
4664         if (!rbd_dev->parent_spec)
4665                 return 0;
4666         /*
4667          * We need to pass a reference to the client and the parent
4668          * spec when creating the parent rbd_dev.  Images related by
4669          * parent/child relationships always share both.
4670          */
4671         parent_spec = rbd_spec_get(rbd_dev->parent_spec);
4672         rbdc = __rbd_get_client(rbd_dev->rbd_client);
4673
4674         ret = -ENOMEM;
4675         parent = rbd_dev_create(rbdc, parent_spec);
4676         if (!parent)
4677                 goto out_err;
4678
4679         ret = rbd_dev_image_probe(parent);
4680         if (ret < 0)
4681                 goto out_err;
4682         rbd_dev->parent = parent;
4683
4684         return 0;
4685 out_err:
4686         if (parent) {
4687                 rbd_spec_put(rbd_dev->parent_spec);
4688                 kfree(rbd_dev->header_name);
4689                 rbd_dev_destroy(parent);
4690         } else {
4691                 rbd_put_client(rbdc);
4692                 rbd_spec_put(parent_spec);
4693         }
4694
4695         return ret;
4696 }
4697
4698 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4699 {
4700         int ret;
4701
4702         ret = rbd_dev_mapping_set(rbd_dev);
4703         if (ret)
4704                 return ret;
4705
4706         /* generate unique id: find highest unique id, add one */
4707         rbd_dev_id_get(rbd_dev);
4708
4709         /* Fill in the device name, now that we have its id. */
4710         BUILD_BUG_ON(DEV_NAME_LEN
4711                         < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
4712         sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
4713
4714         /* Get our block major device number. */
4715
4716         ret = register_blkdev(0, rbd_dev->name);
4717         if (ret < 0)
4718                 goto err_out_id;
4719         rbd_dev->major = ret;
4720
4721         /* Set up the blkdev mapping. */
4722
4723         ret = rbd_init_disk(rbd_dev);
4724         if (ret)
4725                 goto err_out_blkdev;
4726
4727         ret = rbd_bus_add_dev(rbd_dev);
4728         if (ret)
4729                 goto err_out_disk;
4730
4731         /* Everything's ready.  Announce the disk to the world. */
4732
4733         set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
4734         set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4735         add_disk(rbd_dev->disk);
4736
4737         pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
4738                 (unsigned long long) rbd_dev->mapping.size);
4739
4740         return ret;
4741
4742 err_out_disk:
4743         rbd_free_disk(rbd_dev);
4744 err_out_blkdev:
4745         unregister_blkdev(rbd_dev->major, rbd_dev->name);
4746 err_out_id:
4747         rbd_dev_id_put(rbd_dev);
4748         rbd_dev_mapping_clear(rbd_dev);
4749
4750         return ret;
4751 }
4752
4753 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
4754 {
4755         struct rbd_spec *spec = rbd_dev->spec;
4756         size_t size;
4757
4758         /* Record the header object name for this rbd image. */
4759
4760         rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4761
4762         if (rbd_dev->image_format == 1)
4763                 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
4764         else
4765                 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
4766
4767         rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
4768         if (!rbd_dev->header_name)
4769                 return -ENOMEM;
4770
4771         if (rbd_dev->image_format == 1)
4772                 sprintf(rbd_dev->header_name, "%s%s",
4773                         spec->image_name, RBD_SUFFIX);
4774         else
4775                 sprintf(rbd_dev->header_name, "%s%s",
4776                         RBD_HEADER_PREFIX, spec->image_id);
4777         return 0;
4778 }
4779
4780 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
4781 {
4782         int ret;
4783
4784         rbd_remove_all_snaps(rbd_dev);
4785         rbd_dev_unprobe(rbd_dev);
4786         ret = rbd_dev_header_watch_sync(rbd_dev, 0);
4787         if (ret)
4788                 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
4789         kfree(rbd_dev->header_name);
4790         rbd_dev->header_name = NULL;
4791         rbd_dev->image_format = 0;
4792         kfree(rbd_dev->spec->image_id);
4793         rbd_dev->spec->image_id = NULL;
4794
4795         rbd_dev_destroy(rbd_dev);
4796 }
4797
4798 /*
4799  * Probe for the existence of the header object for the given rbd
4800  * device.  For format 2 images this includes determining the image
4801  * id.
4802  */
4803 static int rbd_dev_image_probe(struct rbd_device *rbd_dev)
4804 {
4805         int ret;
4806         int tmp;
4807
4808         /*
4809          * Get the id from the image id object.  If it's not a
4810          * format 2 image, we'll get ENOENT back, and we'll assume
4811          * it's a format 1 image.
4812          */
4813         ret = rbd_dev_image_id(rbd_dev);
4814         if (ret)
4815                 return ret;
4816         rbd_assert(rbd_dev->spec->image_id);
4817         rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4818
4819         ret = rbd_dev_header_name(rbd_dev);
4820         if (ret)
4821                 goto err_out_format;
4822
4823         ret = rbd_dev_header_watch_sync(rbd_dev, 1);
4824         if (ret)
4825                 goto out_header_name;
4826
4827         if (rbd_dev->image_format == 1)
4828                 ret = rbd_dev_v1_probe(rbd_dev);
4829         else
4830                 ret = rbd_dev_v2_probe(rbd_dev);
4831         if (ret)
4832                 goto err_out_watch;
4833
4834         ret = rbd_dev_snaps_update(rbd_dev);
4835         if (ret)
4836                 goto err_out_probe;
4837
4838         ret = rbd_dev_spec_update(rbd_dev);
4839         if (ret)
4840                 goto err_out_snaps;
4841
4842         ret = rbd_dev_probe_parent(rbd_dev);
4843         if (!ret)
4844                 return 0;
4845
4846 err_out_snaps:
4847         rbd_remove_all_snaps(rbd_dev);
4848 err_out_probe:
4849         rbd_dev_unprobe(rbd_dev);
4850 err_out_watch:
4851         tmp = rbd_dev_header_watch_sync(rbd_dev, 0);
4852         if (tmp)
4853                 rbd_warn(rbd_dev, "unable to tear down watch request\n");
4854 out_header_name:
4855         kfree(rbd_dev->header_name);
4856         rbd_dev->header_name = NULL;
4857 err_out_format:
4858         rbd_dev->image_format = 0;
4859         kfree(rbd_dev->spec->image_id);
4860         rbd_dev->spec->image_id = NULL;
4861
4862         dout("probe failed, returning %d\n", ret);
4863
4864         return ret;
4865 }
4866
4867 static ssize_t rbd_add(struct bus_type *bus,
4868                        const char *buf,
4869                        size_t count)
4870 {
4871         struct rbd_device *rbd_dev = NULL;
4872         struct ceph_options *ceph_opts = NULL;
4873         struct rbd_options *rbd_opts = NULL;
4874         struct rbd_spec *spec = NULL;
4875         struct rbd_client *rbdc;
4876         struct ceph_osd_client *osdc;
4877         int rc = -ENOMEM;
4878
4879         if (!try_module_get(THIS_MODULE))
4880                 return -ENODEV;
4881
4882         /* parse add command */
4883         rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
4884         if (rc < 0)
4885                 goto err_out_module;
4886
4887         rbdc = rbd_get_client(ceph_opts);
4888         if (IS_ERR(rbdc)) {
4889                 rc = PTR_ERR(rbdc);
4890                 goto err_out_args;
4891         }
4892         ceph_opts = NULL;       /* rbd_dev client now owns this */
4893
4894         /* pick the pool */
4895         osdc = &rbdc->client->osdc;
4896         rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
4897         if (rc < 0)
4898                 goto err_out_client;
4899         spec->pool_id = (u64)rc;
4900
4901         /* The ceph file layout needs to fit pool id in 32 bits */
4902
4903         if (spec->pool_id > (u64)U32_MAX) {
4904                 rbd_warn(NULL, "pool id too large (%llu > %u)\n",
4905                                 (unsigned long long)spec->pool_id, U32_MAX);
4906                 rc = -EIO;
4907                 goto err_out_client;
4908         }
4909
4910         rbd_dev = rbd_dev_create(rbdc, spec);
4911         if (!rbd_dev)
4912                 goto err_out_client;
4913         rbdc = NULL;            /* rbd_dev now owns this */
4914         spec = NULL;            /* rbd_dev now owns this */
4915
4916         rbd_dev->mapping.read_only = rbd_opts->read_only;
4917         kfree(rbd_opts);
4918         rbd_opts = NULL;        /* done with this */
4919
4920         rc = rbd_dev_image_probe(rbd_dev);
4921         if (rc < 0)
4922                 goto err_out_rbd_dev;
4923
4924         rc = rbd_dev_device_setup(rbd_dev);
4925         if (!rc)
4926                 return count;
4927
4928         rbd_dev_image_release(rbd_dev);
4929 err_out_rbd_dev:
4930         rbd_dev_destroy(rbd_dev);
4931 err_out_client:
4932         rbd_put_client(rbdc);
4933 err_out_args:
4934         if (ceph_opts)
4935                 ceph_destroy_options(ceph_opts);
4936         kfree(rbd_opts);
4937         rbd_spec_put(spec);
4938 err_out_module:
4939         module_put(THIS_MODULE);
4940
4941         dout("Error adding device %s\n", buf);
4942
4943         return (ssize_t)rc;
4944 }
4945
4946 static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
4947 {
4948         struct list_head *tmp;
4949         struct rbd_device *rbd_dev;
4950
4951         spin_lock(&rbd_dev_list_lock);
4952         list_for_each(tmp, &rbd_dev_list) {
4953                 rbd_dev = list_entry(tmp, struct rbd_device, node);
4954                 if (rbd_dev->dev_id == dev_id) {
4955                         spin_unlock(&rbd_dev_list_lock);
4956                         return rbd_dev;
4957                 }
4958         }
4959         spin_unlock(&rbd_dev_list_lock);
4960         return NULL;
4961 }
4962
4963 static void rbd_dev_device_release(struct device *dev)
4964 {
4965         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4966
4967         rbd_free_disk(rbd_dev);
4968         clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4969         rbd_dev_clear_mapping(rbd_dev);
4970         unregister_blkdev(rbd_dev->major, rbd_dev->name);
4971         rbd_dev->major = 0;
4972         rbd_dev_id_put(rbd_dev);
4973         rbd_dev_mapping_clear(rbd_dev);
4974 }
4975
4976 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
4977 {
4978         while (rbd_dev->parent) {
4979                 struct rbd_device *first = rbd_dev;
4980                 struct rbd_device *second = first->parent;
4981                 struct rbd_device *third;
4982
4983                 /*
4984                  * Follow to the parent with no grandparent and
4985                  * remove it.
4986                  */
4987                 while (second && (third = second->parent)) {
4988                         first = second;
4989                         second = third;
4990                 }
4991                 rbd_assert(second);
4992                 rbd_dev_image_release(second);
4993                 first->parent = NULL;
4994                 first->parent_overlap = 0;
4995
4996                 rbd_assert(first->parent_spec);
4997                 rbd_spec_put(first->parent_spec);
4998                 first->parent_spec = NULL;
4999         }
5000 }
5001
5002 static ssize_t rbd_remove(struct bus_type *bus,
5003                           const char *buf,
5004                           size_t count)
5005 {
5006         struct rbd_device *rbd_dev = NULL;
5007         int target_id;
5008         unsigned long ul;
5009         int ret;
5010
5011         ret = strict_strtoul(buf, 10, &ul);
5012         if (ret)
5013                 return ret;
5014
5015         /* convert to int; abort if we lost anything in the conversion */
5016         target_id = (int) ul;
5017         if (target_id != ul)
5018                 return -EINVAL;
5019
5020         mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
5021
5022         rbd_dev = __rbd_get_dev(target_id);
5023         if (!rbd_dev) {
5024                 ret = -ENOENT;
5025                 goto done;
5026         }
5027
5028         spin_lock_irq(&rbd_dev->lock);
5029         if (rbd_dev->open_count)
5030                 ret = -EBUSY;
5031         else
5032                 set_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
5033         spin_unlock_irq(&rbd_dev->lock);
5034         if (ret < 0)
5035                 goto done;
5036         ret = count;
5037         rbd_bus_del_dev(rbd_dev);
5038         rbd_dev_image_release(rbd_dev);
5039         module_put(THIS_MODULE);
5040 done:
5041         mutex_unlock(&ctl_mutex);
5042
5043         return ret;
5044 }
5045
5046 /*
5047  * create control files in sysfs
5048  * /sys/bus/rbd/...
5049  */
5050 static int rbd_sysfs_init(void)
5051 {
5052         int ret;
5053
5054         ret = device_register(&rbd_root_dev);
5055         if (ret < 0)
5056                 return ret;
5057
5058         ret = bus_register(&rbd_bus_type);
5059         if (ret < 0)
5060                 device_unregister(&rbd_root_dev);
5061
5062         return ret;
5063 }
5064
5065 static void rbd_sysfs_cleanup(void)
5066 {
5067         bus_unregister(&rbd_bus_type);
5068         device_unregister(&rbd_root_dev);
5069 }
5070
5071 static int __init rbd_init(void)
5072 {
5073         int rc;
5074
5075         if (!libceph_compatible(NULL)) {
5076                 rbd_warn(NULL, "libceph incompatibility (quitting)");
5077
5078                 return -EINVAL;
5079         }
5080         rc = rbd_sysfs_init();
5081         if (rc)
5082                 return rc;
5083         pr_info("loaded " RBD_DRV_NAME_LONG "\n");
5084         return 0;
5085 }
5086
5087 static void __exit rbd_exit(void)
5088 {
5089         rbd_sysfs_cleanup();
5090 }
5091
5092 module_init(rbd_init);
5093 module_exit(rbd_exit);
5094
5095 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5096 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5097 MODULE_DESCRIPTION("rados block device");
5098
5099 /* following authorship retained from original osdblk.c */
5100 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5101
5102 MODULE_LICENSE("GPL");