]> Pileus Git - ~andy/linux/blob - drivers/block/rbd.c
rbd: simplify rbd_dev_v1_probe()
[~andy/linux] / drivers / block / rbd.c
1
2 /*
3    rbd.c -- Export ceph rados objects as a Linux block device
4
5
6    based on drivers/block/osdblk.c:
7
8    Copyright 2009 Red Hat, Inc.
9
10    This program is free software; you can redistribute it and/or modify
11    it under the terms of the GNU General Public License as published by
12    the Free Software Foundation.
13
14    This program is distributed in the hope that it will be useful,
15    but WITHOUT ANY WARRANTY; without even the implied warranty of
16    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17    GNU General Public License for more details.
18
19    You should have received a copy of the GNU General Public License
20    along with this program; see the file COPYING.  If not, write to
21    the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
22
23
24
25    For usage instructions, please refer to:
26
27                  Documentation/ABI/testing/sysfs-bus-rbd
28
29  */
30
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
37
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
41 #include <linux/fs.h>
42 #include <linux/blkdev.h>
43 #include <linux/slab.h>
44
45 #include "rbd_types.h"
46
47 #define RBD_DEBUG       /* Activate rbd_assert() calls */
48
49 /*
50  * The basic unit of block I/O is a sector.  It is interpreted in a
51  * number of contexts in Linux (blk, bio, genhd), but the default is
52  * universally 512 bytes.  These symbols are just slightly more
53  * meaningful than the bare numbers they represent.
54  */
55 #define SECTOR_SHIFT    9
56 #define SECTOR_SIZE     (1ULL << SECTOR_SHIFT)
57
58 #define RBD_DRV_NAME "rbd"
59 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
60
61 #define RBD_MINORS_PER_MAJOR    256             /* max minors per blkdev */
62
63 #define RBD_SNAP_DEV_NAME_PREFIX        "snap_"
64 #define RBD_MAX_SNAP_NAME_LEN   \
65                         (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
66
67 #define RBD_MAX_SNAP_COUNT      510     /* allows max snapc to fit in 4KB */
68
69 #define RBD_SNAP_HEAD_NAME      "-"
70
71 #define BAD_SNAP_INDEX  U32_MAX         /* invalid index into snap array */
72
73 /* This allows a single page to hold an image name sent by OSD */
74 #define RBD_IMAGE_NAME_LEN_MAX  (PAGE_SIZE - sizeof (__le32) - 1)
75 #define RBD_IMAGE_ID_LEN_MAX    64
76
77 #define RBD_OBJ_PREFIX_LEN_MAX  64
78
79 /* Feature bits */
80
81 #define RBD_FEATURE_LAYERING    (1<<0)
82 #define RBD_FEATURE_STRIPINGV2  (1<<1)
83 #define RBD_FEATURES_ALL \
84             (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
85
86 /* Features supported by this (client software) implementation. */
87
88 #define RBD_FEATURES_SUPPORTED  (RBD_FEATURES_ALL)
89
90 /*
91  * An RBD device name will be "rbd#", where the "rbd" comes from
92  * RBD_DRV_NAME above, and # is a unique integer identifier.
93  * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
94  * enough to hold all possible device names.
95  */
96 #define DEV_NAME_LEN            32
97 #define MAX_INT_FORMAT_WIDTH    ((5 * sizeof (int)) / 2 + 1)
98
99 /*
100  * block device image metadata (in-memory version)
101  */
102 struct rbd_image_header {
103         /* These six fields never change for a given rbd image */
104         char *object_prefix;
105         __u8 obj_order;
106         __u8 crypt_type;
107         __u8 comp_type;
108         u64 stripe_unit;
109         u64 stripe_count;
110         u64 features;           /* Might be changeable someday? */
111
112         /* The remaining fields need to be updated occasionally */
113         u64 image_size;
114         struct ceph_snap_context *snapc;
115         char *snap_names;       /* format 1 only */
116         u64 *snap_sizes;        /* format 1 only */
117 };
118
119 /*
120  * An rbd image specification.
121  *
122  * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
123  * identify an image.  Each rbd_dev structure includes a pointer to
124  * an rbd_spec structure that encapsulates this identity.
125  *
126  * Each of the id's in an rbd_spec has an associated name.  For a
127  * user-mapped image, the names are supplied and the id's associated
128  * with them are looked up.  For a layered image, a parent image is
129  * defined by the tuple, and the names are looked up.
130  *
131  * An rbd_dev structure contains a parent_spec pointer which is
132  * non-null if the image it represents is a child in a layered
133  * image.  This pointer will refer to the rbd_spec structure used
134  * by the parent rbd_dev for its own identity (i.e., the structure
135  * is shared between the parent and child).
136  *
137  * Since these structures are populated once, during the discovery
138  * phase of image construction, they are effectively immutable so
139  * we make no effort to synchronize access to them.
140  *
141  * Note that code herein does not assume the image name is known (it
142  * could be a null pointer).
143  */
144 struct rbd_spec {
145         u64             pool_id;
146         const char      *pool_name;
147
148         const char      *image_id;
149         const char      *image_name;
150
151         u64             snap_id;
152         const char      *snap_name;
153
154         struct kref     kref;
155 };
156
157 /*
158  * an instance of the client.  multiple devices may share an rbd client.
159  */
160 struct rbd_client {
161         struct ceph_client      *client;
162         struct kref             kref;
163         struct list_head        node;
164 };
165
166 struct rbd_img_request;
167 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
168
169 #define BAD_WHICH       U32_MAX         /* Good which or bad which, which? */
170
171 struct rbd_obj_request;
172 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
173
174 enum obj_request_type {
175         OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
176 };
177
178 enum obj_req_flags {
179         OBJ_REQ_DONE,           /* completion flag: not done = 0, done = 1 */
180         OBJ_REQ_IMG_DATA,       /* object usage: standalone = 0, image = 1 */
181         OBJ_REQ_KNOWN,          /* EXISTS flag valid: no = 0, yes = 1 */
182         OBJ_REQ_EXISTS,         /* target exists: no = 0, yes = 1 */
183 };
184
185 struct rbd_obj_request {
186         const char              *object_name;
187         u64                     offset;         /* object start byte */
188         u64                     length;         /* bytes from offset */
189         unsigned long           flags;
190
191         /*
192          * An object request associated with an image will have its
193          * img_data flag set; a standalone object request will not.
194          *
195          * A standalone object request will have which == BAD_WHICH
196          * and a null obj_request pointer.
197          *
198          * An object request initiated in support of a layered image
199          * object (to check for its existence before a write) will
200          * have which == BAD_WHICH and a non-null obj_request pointer.
201          *
202          * Finally, an object request for rbd image data will have
203          * which != BAD_WHICH, and will have a non-null img_request
204          * pointer.  The value of which will be in the range
205          * 0..(img_request->obj_request_count-1).
206          */
207         union {
208                 struct rbd_obj_request  *obj_request;   /* STAT op */
209                 struct {
210                         struct rbd_img_request  *img_request;
211                         u64                     img_offset;
212                         /* links for img_request->obj_requests list */
213                         struct list_head        links;
214                 };
215         };
216         u32                     which;          /* posn image request list */
217
218         enum obj_request_type   type;
219         union {
220                 struct bio      *bio_list;
221                 struct {
222                         struct page     **pages;
223                         u32             page_count;
224                 };
225         };
226         struct page             **copyup_pages;
227
228         struct ceph_osd_request *osd_req;
229
230         u64                     xferred;        /* bytes transferred */
231         int                     result;
232
233         rbd_obj_callback_t      callback;
234         struct completion       completion;
235
236         struct kref             kref;
237 };
238
239 enum img_req_flags {
240         IMG_REQ_WRITE,          /* I/O direction: read = 0, write = 1 */
241         IMG_REQ_CHILD,          /* initiator: block = 0, child image = 1 */
242         IMG_REQ_LAYERED,        /* ENOENT handling: normal = 0, layered = 1 */
243 };
244
245 struct rbd_img_request {
246         struct rbd_device       *rbd_dev;
247         u64                     offset; /* starting image byte offset */
248         u64                     length; /* byte count from offset */
249         unsigned long           flags;
250         union {
251                 u64                     snap_id;        /* for reads */
252                 struct ceph_snap_context *snapc;        /* for writes */
253         };
254         union {
255                 struct request          *rq;            /* block request */
256                 struct rbd_obj_request  *obj_request;   /* obj req initiator */
257         };
258         struct page             **copyup_pages;
259         spinlock_t              completion_lock;/* protects next_completion */
260         u32                     next_completion;
261         rbd_img_callback_t      callback;
262         u64                     xferred;/* aggregate bytes transferred */
263         int                     result; /* first nonzero obj_request result */
264
265         u32                     obj_request_count;
266         struct list_head        obj_requests;   /* rbd_obj_request structs */
267
268         struct kref             kref;
269 };
270
271 #define for_each_obj_request(ireq, oreq) \
272         list_for_each_entry(oreq, &(ireq)->obj_requests, links)
273 #define for_each_obj_request_from(ireq, oreq) \
274         list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
275 #define for_each_obj_request_safe(ireq, oreq, n) \
276         list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
277
278 struct rbd_mapping {
279         u64                     size;
280         u64                     features;
281         bool                    read_only;
282 };
283
284 /*
285  * a single device
286  */
287 struct rbd_device {
288         int                     dev_id;         /* blkdev unique id */
289
290         int                     major;          /* blkdev assigned major */
291         struct gendisk          *disk;          /* blkdev's gendisk and rq */
292
293         u32                     image_format;   /* Either 1 or 2 */
294         struct rbd_client       *rbd_client;
295
296         char                    name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
297
298         spinlock_t              lock;           /* queue, flags, open_count */
299
300         struct rbd_image_header header;
301         unsigned long           flags;          /* possibly lock protected */
302         struct rbd_spec         *spec;
303
304         char                    *header_name;
305
306         struct ceph_file_layout layout;
307
308         struct ceph_osd_event   *watch_event;
309         struct rbd_obj_request  *watch_request;
310
311         struct rbd_spec         *parent_spec;
312         u64                     parent_overlap;
313         struct rbd_device       *parent;
314
315         /* protects updating the header */
316         struct rw_semaphore     header_rwsem;
317
318         struct rbd_mapping      mapping;
319
320         struct list_head        node;
321
322         /* sysfs related */
323         struct device           dev;
324         unsigned long           open_count;     /* protected by lock */
325 };
326
327 /*
328  * Flag bits for rbd_dev->flags.  If atomicity is required,
329  * rbd_dev->lock is used to protect access.
330  *
331  * Currently, only the "removing" flag (which is coupled with the
332  * "open_count" field) requires atomic access.
333  */
334 enum rbd_dev_flags {
335         RBD_DEV_FLAG_EXISTS,    /* mapped snapshot has not been deleted */
336         RBD_DEV_FLAG_REMOVING,  /* this mapping is being removed */
337 };
338
339 static DEFINE_MUTEX(ctl_mutex);   /* Serialize open/close/setup/teardown */
340
341 static LIST_HEAD(rbd_dev_list);    /* devices */
342 static DEFINE_SPINLOCK(rbd_dev_list_lock);
343
344 static LIST_HEAD(rbd_client_list);              /* clients */
345 static DEFINE_SPINLOCK(rbd_client_list_lock);
346
347 /* Slab caches for frequently-allocated structures */
348
349 static struct kmem_cache        *rbd_img_request_cache;
350 static struct kmem_cache        *rbd_obj_request_cache;
351 static struct kmem_cache        *rbd_segment_name_cache;
352
353 static int rbd_img_request_submit(struct rbd_img_request *img_request);
354
355 static void rbd_dev_device_release(struct device *dev);
356
357 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
358                        size_t count);
359 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
360                           size_t count);
361 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool read_only);
362
363 static struct bus_attribute rbd_bus_attrs[] = {
364         __ATTR(add, S_IWUSR, NULL, rbd_add),
365         __ATTR(remove, S_IWUSR, NULL, rbd_remove),
366         __ATTR_NULL
367 };
368
369 static struct bus_type rbd_bus_type = {
370         .name           = "rbd",
371         .bus_attrs      = rbd_bus_attrs,
372 };
373
374 static void rbd_root_dev_release(struct device *dev)
375 {
376 }
377
378 static struct device rbd_root_dev = {
379         .init_name =    "rbd",
380         .release =      rbd_root_dev_release,
381 };
382
383 static __printf(2, 3)
384 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
385 {
386         struct va_format vaf;
387         va_list args;
388
389         va_start(args, fmt);
390         vaf.fmt = fmt;
391         vaf.va = &args;
392
393         if (!rbd_dev)
394                 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
395         else if (rbd_dev->disk)
396                 printk(KERN_WARNING "%s: %s: %pV\n",
397                         RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
398         else if (rbd_dev->spec && rbd_dev->spec->image_name)
399                 printk(KERN_WARNING "%s: image %s: %pV\n",
400                         RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
401         else if (rbd_dev->spec && rbd_dev->spec->image_id)
402                 printk(KERN_WARNING "%s: id %s: %pV\n",
403                         RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
404         else    /* punt */
405                 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
406                         RBD_DRV_NAME, rbd_dev, &vaf);
407         va_end(args);
408 }
409
410 #ifdef RBD_DEBUG
411 #define rbd_assert(expr)                                                \
412                 if (unlikely(!(expr))) {                                \
413                         printk(KERN_ERR "\nAssertion failure in %s() "  \
414                                                 "at line %d:\n\n"       \
415                                         "\trbd_assert(%s);\n\n",        \
416                                         __func__, __LINE__, #expr);     \
417                         BUG();                                          \
418                 }
419 #else /* !RBD_DEBUG */
420 #  define rbd_assert(expr)      ((void) 0)
421 #endif /* !RBD_DEBUG */
422
423 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
424 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
425 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
426
427 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
428 static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev);
429 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
430                                         u64 snap_id);
431 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
432                                 u8 *order, u64 *snap_size);
433 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
434                 u64 *snap_features);
435 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
436
437 static int rbd_open(struct block_device *bdev, fmode_t mode)
438 {
439         struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
440         bool removing = false;
441
442         if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
443                 return -EROFS;
444
445         spin_lock_irq(&rbd_dev->lock);
446         if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
447                 removing = true;
448         else
449                 rbd_dev->open_count++;
450         spin_unlock_irq(&rbd_dev->lock);
451         if (removing)
452                 return -ENOENT;
453
454         mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
455         (void) get_device(&rbd_dev->dev);
456         set_device_ro(bdev, rbd_dev->mapping.read_only);
457         mutex_unlock(&ctl_mutex);
458
459         return 0;
460 }
461
462 static int rbd_release(struct gendisk *disk, fmode_t mode)
463 {
464         struct rbd_device *rbd_dev = disk->private_data;
465         unsigned long open_count_before;
466
467         spin_lock_irq(&rbd_dev->lock);
468         open_count_before = rbd_dev->open_count--;
469         spin_unlock_irq(&rbd_dev->lock);
470         rbd_assert(open_count_before > 0);
471
472         mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
473         put_device(&rbd_dev->dev);
474         mutex_unlock(&ctl_mutex);
475
476         return 0;
477 }
478
479 static const struct block_device_operations rbd_bd_ops = {
480         .owner                  = THIS_MODULE,
481         .open                   = rbd_open,
482         .release                = rbd_release,
483 };
484
485 /*
486  * Initialize an rbd client instance.
487  * We own *ceph_opts.
488  */
489 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
490 {
491         struct rbd_client *rbdc;
492         int ret = -ENOMEM;
493
494         dout("%s:\n", __func__);
495         rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
496         if (!rbdc)
497                 goto out_opt;
498
499         kref_init(&rbdc->kref);
500         INIT_LIST_HEAD(&rbdc->node);
501
502         mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
503
504         rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
505         if (IS_ERR(rbdc->client))
506                 goto out_mutex;
507         ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
508
509         ret = ceph_open_session(rbdc->client);
510         if (ret < 0)
511                 goto out_err;
512
513         spin_lock(&rbd_client_list_lock);
514         list_add_tail(&rbdc->node, &rbd_client_list);
515         spin_unlock(&rbd_client_list_lock);
516
517         mutex_unlock(&ctl_mutex);
518         dout("%s: rbdc %p\n", __func__, rbdc);
519
520         return rbdc;
521
522 out_err:
523         ceph_destroy_client(rbdc->client);
524 out_mutex:
525         mutex_unlock(&ctl_mutex);
526         kfree(rbdc);
527 out_opt:
528         if (ceph_opts)
529                 ceph_destroy_options(ceph_opts);
530         dout("%s: error %d\n", __func__, ret);
531
532         return ERR_PTR(ret);
533 }
534
535 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
536 {
537         kref_get(&rbdc->kref);
538
539         return rbdc;
540 }
541
542 /*
543  * Find a ceph client with specific addr and configuration.  If
544  * found, bump its reference count.
545  */
546 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
547 {
548         struct rbd_client *client_node;
549         bool found = false;
550
551         if (ceph_opts->flags & CEPH_OPT_NOSHARE)
552                 return NULL;
553
554         spin_lock(&rbd_client_list_lock);
555         list_for_each_entry(client_node, &rbd_client_list, node) {
556                 if (!ceph_compare_options(ceph_opts, client_node->client)) {
557                         __rbd_get_client(client_node);
558
559                         found = true;
560                         break;
561                 }
562         }
563         spin_unlock(&rbd_client_list_lock);
564
565         return found ? client_node : NULL;
566 }
567
568 /*
569  * mount options
570  */
571 enum {
572         Opt_last_int,
573         /* int args above */
574         Opt_last_string,
575         /* string args above */
576         Opt_read_only,
577         Opt_read_write,
578         /* Boolean args above */
579         Opt_last_bool,
580 };
581
582 static match_table_t rbd_opts_tokens = {
583         /* int args above */
584         /* string args above */
585         {Opt_read_only, "read_only"},
586         {Opt_read_only, "ro"},          /* Alternate spelling */
587         {Opt_read_write, "read_write"},
588         {Opt_read_write, "rw"},         /* Alternate spelling */
589         /* Boolean args above */
590         {-1, NULL}
591 };
592
593 struct rbd_options {
594         bool    read_only;
595 };
596
597 #define RBD_READ_ONLY_DEFAULT   false
598
599 static int parse_rbd_opts_token(char *c, void *private)
600 {
601         struct rbd_options *rbd_opts = private;
602         substring_t argstr[MAX_OPT_ARGS];
603         int token, intval, ret;
604
605         token = match_token(c, rbd_opts_tokens, argstr);
606         if (token < 0)
607                 return -EINVAL;
608
609         if (token < Opt_last_int) {
610                 ret = match_int(&argstr[0], &intval);
611                 if (ret < 0) {
612                         pr_err("bad mount option arg (not int) "
613                                "at '%s'\n", c);
614                         return ret;
615                 }
616                 dout("got int token %d val %d\n", token, intval);
617         } else if (token > Opt_last_int && token < Opt_last_string) {
618                 dout("got string token %d val %s\n", token,
619                      argstr[0].from);
620         } else if (token > Opt_last_string && token < Opt_last_bool) {
621                 dout("got Boolean token %d\n", token);
622         } else {
623                 dout("got token %d\n", token);
624         }
625
626         switch (token) {
627         case Opt_read_only:
628                 rbd_opts->read_only = true;
629                 break;
630         case Opt_read_write:
631                 rbd_opts->read_only = false;
632                 break;
633         default:
634                 rbd_assert(false);
635                 break;
636         }
637         return 0;
638 }
639
640 /*
641  * Get a ceph client with specific addr and configuration, if one does
642  * not exist create it.
643  */
644 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
645 {
646         struct rbd_client *rbdc;
647
648         rbdc = rbd_client_find(ceph_opts);
649         if (rbdc)       /* using an existing client */
650                 ceph_destroy_options(ceph_opts);
651         else
652                 rbdc = rbd_client_create(ceph_opts);
653
654         return rbdc;
655 }
656
657 /*
658  * Destroy ceph client
659  *
660  * Caller must hold rbd_client_list_lock.
661  */
662 static void rbd_client_release(struct kref *kref)
663 {
664         struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
665
666         dout("%s: rbdc %p\n", __func__, rbdc);
667         spin_lock(&rbd_client_list_lock);
668         list_del(&rbdc->node);
669         spin_unlock(&rbd_client_list_lock);
670
671         ceph_destroy_client(rbdc->client);
672         kfree(rbdc);
673 }
674
675 /*
676  * Drop reference to ceph client node. If it's not referenced anymore, release
677  * it.
678  */
679 static void rbd_put_client(struct rbd_client *rbdc)
680 {
681         if (rbdc)
682                 kref_put(&rbdc->kref, rbd_client_release);
683 }
684
685 static bool rbd_image_format_valid(u32 image_format)
686 {
687         return image_format == 1 || image_format == 2;
688 }
689
690 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
691 {
692         size_t size;
693         u32 snap_count;
694
695         /* The header has to start with the magic rbd header text */
696         if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
697                 return false;
698
699         /* The bio layer requires at least sector-sized I/O */
700
701         if (ondisk->options.order < SECTOR_SHIFT)
702                 return false;
703
704         /* If we use u64 in a few spots we may be able to loosen this */
705
706         if (ondisk->options.order > 8 * sizeof (int) - 1)
707                 return false;
708
709         /*
710          * The size of a snapshot header has to fit in a size_t, and
711          * that limits the number of snapshots.
712          */
713         snap_count = le32_to_cpu(ondisk->snap_count);
714         size = SIZE_MAX - sizeof (struct ceph_snap_context);
715         if (snap_count > size / sizeof (__le64))
716                 return false;
717
718         /*
719          * Not only that, but the size of the entire the snapshot
720          * header must also be representable in a size_t.
721          */
722         size -= snap_count * sizeof (__le64);
723         if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
724                 return false;
725
726         return true;
727 }
728
729 /*
730  * Fill an rbd image header with information from the given format 1
731  * on-disk header.
732  */
733 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
734                                  struct rbd_image_header_ondisk *ondisk)
735 {
736         struct rbd_image_header *header = &rbd_dev->header;
737         bool first_time = header->object_prefix == NULL;
738         struct ceph_snap_context *snapc;
739         char *object_prefix = NULL;
740         char *snap_names = NULL;
741         u64 *snap_sizes = NULL;
742         u32 snap_count;
743         size_t size;
744         int ret = -ENOMEM;
745         u32 i;
746
747         /* Allocate this now to avoid having to handle failure below */
748
749         if (first_time) {
750                 size_t len;
751
752                 len = strnlen(ondisk->object_prefix,
753                                 sizeof (ondisk->object_prefix));
754                 object_prefix = kmalloc(len + 1, GFP_KERNEL);
755                 if (!object_prefix)
756                         return -ENOMEM;
757                 memcpy(object_prefix, ondisk->object_prefix, len);
758                 object_prefix[len] = '\0';
759         }
760
761         /* Allocate the snapshot context and fill it in */
762
763         snap_count = le32_to_cpu(ondisk->snap_count);
764         snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
765         if (!snapc)
766                 goto out_err;
767         snapc->seq = le64_to_cpu(ondisk->snap_seq);
768         if (snap_count) {
769                 struct rbd_image_snap_ondisk *snaps;
770                 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
771
772                 /* We'll keep a copy of the snapshot names... */
773
774                 if (snap_names_len > (u64)SIZE_MAX)
775                         goto out_2big;
776                 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
777                 if (!snap_names)
778                         goto out_err;
779
780                 /* ...as well as the array of their sizes. */
781
782                 size = snap_count * sizeof (*header->snap_sizes);
783                 snap_sizes = kmalloc(size, GFP_KERNEL);
784                 if (!snap_sizes)
785                         goto out_err;
786
787                 /*
788                  * Copy the names, and fill in each snapshot's id
789                  * and size.
790                  *
791                  * Note that rbd_dev_v1_header_read() guarantees the
792                  * ondisk buffer we're working with has
793                  * snap_names_len bytes beyond the end of the
794                  * snapshot id array, this memcpy() is safe.
795                  */
796                 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
797                 snaps = ondisk->snaps;
798                 for (i = 0; i < snap_count; i++) {
799                         snapc->snaps[i] = le64_to_cpu(snaps[i].id);
800                         snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
801                 }
802         }
803
804         /* We won't fail any more, fill in the header */
805
806         down_write(&rbd_dev->header_rwsem);
807         if (first_time) {
808                 header->object_prefix = object_prefix;
809                 header->obj_order = ondisk->options.order;
810                 header->crypt_type = ondisk->options.crypt_type;
811                 header->comp_type = ondisk->options.comp_type;
812                 /* The rest aren't used for format 1 images */
813                 header->stripe_unit = 0;
814                 header->stripe_count = 0;
815                 header->features = 0;
816         } else {
817                 ceph_put_snap_context(header->snapc);
818                 kfree(header->snap_names);
819                 kfree(header->snap_sizes);
820         }
821
822         /* The remaining fields always get updated (when we refresh) */
823
824         header->image_size = le64_to_cpu(ondisk->image_size);
825         header->snapc = snapc;
826         header->snap_names = snap_names;
827         header->snap_sizes = snap_sizes;
828
829         /* Make sure mapping size is consistent with header info */
830
831         if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time)
832                 if (rbd_dev->mapping.size != header->image_size)
833                         rbd_dev->mapping.size = header->image_size;
834
835         up_write(&rbd_dev->header_rwsem);
836
837         return 0;
838 out_2big:
839         ret = -EIO;
840 out_err:
841         kfree(snap_sizes);
842         kfree(snap_names);
843         ceph_put_snap_context(snapc);
844         kfree(object_prefix);
845
846         return ret;
847 }
848
849 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
850 {
851         const char *snap_name;
852
853         rbd_assert(which < rbd_dev->header.snapc->num_snaps);
854
855         /* Skip over names until we find the one we are looking for */
856
857         snap_name = rbd_dev->header.snap_names;
858         while (which--)
859                 snap_name += strlen(snap_name) + 1;
860
861         return kstrdup(snap_name, GFP_KERNEL);
862 }
863
864 /*
865  * Snapshot id comparison function for use with qsort()/bsearch().
866  * Note that result is for snapshots in *descending* order.
867  */
868 static int snapid_compare_reverse(const void *s1, const void *s2)
869 {
870         u64 snap_id1 = *(u64 *)s1;
871         u64 snap_id2 = *(u64 *)s2;
872
873         if (snap_id1 < snap_id2)
874                 return 1;
875         return snap_id1 == snap_id2 ? 0 : -1;
876 }
877
878 /*
879  * Search a snapshot context to see if the given snapshot id is
880  * present.
881  *
882  * Returns the position of the snapshot id in the array if it's found,
883  * or BAD_SNAP_INDEX otherwise.
884  *
885  * Note: The snapshot array is in kept sorted (by the osd) in
886  * reverse order, highest snapshot id first.
887  */
888 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
889 {
890         struct ceph_snap_context *snapc = rbd_dev->header.snapc;
891         u64 *found;
892
893         found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
894                                 sizeof (snap_id), snapid_compare_reverse);
895
896         return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
897 }
898
899 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
900                                         u64 snap_id)
901 {
902         u32 which;
903
904         which = rbd_dev_snap_index(rbd_dev, snap_id);
905         if (which == BAD_SNAP_INDEX)
906                 return NULL;
907
908         return _rbd_dev_v1_snap_name(rbd_dev, which);
909 }
910
911 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
912 {
913         if (snap_id == CEPH_NOSNAP)
914                 return RBD_SNAP_HEAD_NAME;
915
916         rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
917         if (rbd_dev->image_format == 1)
918                 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
919
920         return rbd_dev_v2_snap_name(rbd_dev, snap_id);
921 }
922
923 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
924                                 u64 *snap_size)
925 {
926         rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
927         if (snap_id == CEPH_NOSNAP) {
928                 *snap_size = rbd_dev->header.image_size;
929         } else if (rbd_dev->image_format == 1) {
930                 u32 which;
931
932                 which = rbd_dev_snap_index(rbd_dev, snap_id);
933                 if (which == BAD_SNAP_INDEX)
934                         return -ENOENT;
935
936                 *snap_size = rbd_dev->header.snap_sizes[which];
937         } else {
938                 u64 size = 0;
939                 int ret;
940
941                 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
942                 if (ret)
943                         return ret;
944
945                 *snap_size = size;
946         }
947         return 0;
948 }
949
950 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
951                         u64 *snap_features)
952 {
953         rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
954         if (snap_id == CEPH_NOSNAP) {
955                 *snap_features = rbd_dev->header.features;
956         } else if (rbd_dev->image_format == 1) {
957                 *snap_features = 0;     /* No features for format 1 */
958         } else {
959                 u64 features = 0;
960                 int ret;
961
962                 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
963                 if (ret)
964                         return ret;
965
966                 *snap_features = features;
967         }
968         return 0;
969 }
970
971 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
972 {
973         u64 snap_id = rbd_dev->spec->snap_id;
974         u64 size = 0;
975         u64 features = 0;
976         int ret;
977
978         ret = rbd_snap_size(rbd_dev, snap_id, &size);
979         if (ret)
980                 return ret;
981         ret = rbd_snap_features(rbd_dev, snap_id, &features);
982         if (ret)
983                 return ret;
984
985         rbd_dev->mapping.size = size;
986         rbd_dev->mapping.features = features;
987
988         return 0;
989 }
990
991 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
992 {
993         rbd_dev->mapping.size = 0;
994         rbd_dev->mapping.features = 0;
995 }
996
997 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
998 {
999         char *name;
1000         u64 segment;
1001         int ret;
1002
1003         name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1004         if (!name)
1005                 return NULL;
1006         segment = offset >> rbd_dev->header.obj_order;
1007         ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
1008                         rbd_dev->header.object_prefix, segment);
1009         if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
1010                 pr_err("error formatting segment name for #%llu (%d)\n",
1011                         segment, ret);
1012                 kfree(name);
1013                 name = NULL;
1014         }
1015
1016         return name;
1017 }
1018
1019 static void rbd_segment_name_free(const char *name)
1020 {
1021         /* The explicit cast here is needed to drop the const qualifier */
1022
1023         kmem_cache_free(rbd_segment_name_cache, (void *)name);
1024 }
1025
1026 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1027 {
1028         u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1029
1030         return offset & (segment_size - 1);
1031 }
1032
1033 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1034                                 u64 offset, u64 length)
1035 {
1036         u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1037
1038         offset &= segment_size - 1;
1039
1040         rbd_assert(length <= U64_MAX - offset);
1041         if (offset + length > segment_size)
1042                 length = segment_size - offset;
1043
1044         return length;
1045 }
1046
1047 /*
1048  * returns the size of an object in the image
1049  */
1050 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1051 {
1052         return 1 << header->obj_order;
1053 }
1054
1055 /*
1056  * bio helpers
1057  */
1058
1059 static void bio_chain_put(struct bio *chain)
1060 {
1061         struct bio *tmp;
1062
1063         while (chain) {
1064                 tmp = chain;
1065                 chain = chain->bi_next;
1066                 bio_put(tmp);
1067         }
1068 }
1069
1070 /*
1071  * zeros a bio chain, starting at specific offset
1072  */
1073 static void zero_bio_chain(struct bio *chain, int start_ofs)
1074 {
1075         struct bio_vec *bv;
1076         unsigned long flags;
1077         void *buf;
1078         int i;
1079         int pos = 0;
1080
1081         while (chain) {
1082                 bio_for_each_segment(bv, chain, i) {
1083                         if (pos + bv->bv_len > start_ofs) {
1084                                 int remainder = max(start_ofs - pos, 0);
1085                                 buf = bvec_kmap_irq(bv, &flags);
1086                                 memset(buf + remainder, 0,
1087                                        bv->bv_len - remainder);
1088                                 bvec_kunmap_irq(buf, &flags);
1089                         }
1090                         pos += bv->bv_len;
1091                 }
1092
1093                 chain = chain->bi_next;
1094         }
1095 }
1096
1097 /*
1098  * similar to zero_bio_chain(), zeros data defined by a page array,
1099  * starting at the given byte offset from the start of the array and
1100  * continuing up to the given end offset.  The pages array is
1101  * assumed to be big enough to hold all bytes up to the end.
1102  */
1103 static void zero_pages(struct page **pages, u64 offset, u64 end)
1104 {
1105         struct page **page = &pages[offset >> PAGE_SHIFT];
1106
1107         rbd_assert(end > offset);
1108         rbd_assert(end - offset <= (u64)SIZE_MAX);
1109         while (offset < end) {
1110                 size_t page_offset;
1111                 size_t length;
1112                 unsigned long flags;
1113                 void *kaddr;
1114
1115                 page_offset = (size_t)(offset & ~PAGE_MASK);
1116                 length = min(PAGE_SIZE - page_offset, (size_t)(end - offset));
1117                 local_irq_save(flags);
1118                 kaddr = kmap_atomic(*page);
1119                 memset(kaddr + page_offset, 0, length);
1120                 kunmap_atomic(kaddr);
1121                 local_irq_restore(flags);
1122
1123                 offset += length;
1124                 page++;
1125         }
1126 }
1127
1128 /*
1129  * Clone a portion of a bio, starting at the given byte offset
1130  * and continuing for the number of bytes indicated.
1131  */
1132 static struct bio *bio_clone_range(struct bio *bio_src,
1133                                         unsigned int offset,
1134                                         unsigned int len,
1135                                         gfp_t gfpmask)
1136 {
1137         struct bio_vec *bv;
1138         unsigned int resid;
1139         unsigned short idx;
1140         unsigned int voff;
1141         unsigned short end_idx;
1142         unsigned short vcnt;
1143         struct bio *bio;
1144
1145         /* Handle the easy case for the caller */
1146
1147         if (!offset && len == bio_src->bi_size)
1148                 return bio_clone(bio_src, gfpmask);
1149
1150         if (WARN_ON_ONCE(!len))
1151                 return NULL;
1152         if (WARN_ON_ONCE(len > bio_src->bi_size))
1153                 return NULL;
1154         if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
1155                 return NULL;
1156
1157         /* Find first affected segment... */
1158
1159         resid = offset;
1160         __bio_for_each_segment(bv, bio_src, idx, 0) {
1161                 if (resid < bv->bv_len)
1162                         break;
1163                 resid -= bv->bv_len;
1164         }
1165         voff = resid;
1166
1167         /* ...and the last affected segment */
1168
1169         resid += len;
1170         __bio_for_each_segment(bv, bio_src, end_idx, idx) {
1171                 if (resid <= bv->bv_len)
1172                         break;
1173                 resid -= bv->bv_len;
1174         }
1175         vcnt = end_idx - idx + 1;
1176
1177         /* Build the clone */
1178
1179         bio = bio_alloc(gfpmask, (unsigned int) vcnt);
1180         if (!bio)
1181                 return NULL;    /* ENOMEM */
1182
1183         bio->bi_bdev = bio_src->bi_bdev;
1184         bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
1185         bio->bi_rw = bio_src->bi_rw;
1186         bio->bi_flags |= 1 << BIO_CLONED;
1187
1188         /*
1189          * Copy over our part of the bio_vec, then update the first
1190          * and last (or only) entries.
1191          */
1192         memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
1193                         vcnt * sizeof (struct bio_vec));
1194         bio->bi_io_vec[0].bv_offset += voff;
1195         if (vcnt > 1) {
1196                 bio->bi_io_vec[0].bv_len -= voff;
1197                 bio->bi_io_vec[vcnt - 1].bv_len = resid;
1198         } else {
1199                 bio->bi_io_vec[0].bv_len = len;
1200         }
1201
1202         bio->bi_vcnt = vcnt;
1203         bio->bi_size = len;
1204         bio->bi_idx = 0;
1205
1206         return bio;
1207 }
1208
1209 /*
1210  * Clone a portion of a bio chain, starting at the given byte offset
1211  * into the first bio in the source chain and continuing for the
1212  * number of bytes indicated.  The result is another bio chain of
1213  * exactly the given length, or a null pointer on error.
1214  *
1215  * The bio_src and offset parameters are both in-out.  On entry they
1216  * refer to the first source bio and the offset into that bio where
1217  * the start of data to be cloned is located.
1218  *
1219  * On return, bio_src is updated to refer to the bio in the source
1220  * chain that contains first un-cloned byte, and *offset will
1221  * contain the offset of that byte within that bio.
1222  */
1223 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1224                                         unsigned int *offset,
1225                                         unsigned int len,
1226                                         gfp_t gfpmask)
1227 {
1228         struct bio *bi = *bio_src;
1229         unsigned int off = *offset;
1230         struct bio *chain = NULL;
1231         struct bio **end;
1232
1233         /* Build up a chain of clone bios up to the limit */
1234
1235         if (!bi || off >= bi->bi_size || !len)
1236                 return NULL;            /* Nothing to clone */
1237
1238         end = &chain;
1239         while (len) {
1240                 unsigned int bi_size;
1241                 struct bio *bio;
1242
1243                 if (!bi) {
1244                         rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1245                         goto out_err;   /* EINVAL; ran out of bio's */
1246                 }
1247                 bi_size = min_t(unsigned int, bi->bi_size - off, len);
1248                 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1249                 if (!bio)
1250                         goto out_err;   /* ENOMEM */
1251
1252                 *end = bio;
1253                 end = &bio->bi_next;
1254
1255                 off += bi_size;
1256                 if (off == bi->bi_size) {
1257                         bi = bi->bi_next;
1258                         off = 0;
1259                 }
1260                 len -= bi_size;
1261         }
1262         *bio_src = bi;
1263         *offset = off;
1264
1265         return chain;
1266 out_err:
1267         bio_chain_put(chain);
1268
1269         return NULL;
1270 }
1271
1272 /*
1273  * The default/initial value for all object request flags is 0.  For
1274  * each flag, once its value is set to 1 it is never reset to 0
1275  * again.
1276  */
1277 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1278 {
1279         if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1280                 struct rbd_device *rbd_dev;
1281
1282                 rbd_dev = obj_request->img_request->rbd_dev;
1283                 rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
1284                         obj_request);
1285         }
1286 }
1287
1288 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1289 {
1290         smp_mb();
1291         return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1292 }
1293
1294 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1295 {
1296         if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1297                 struct rbd_device *rbd_dev = NULL;
1298
1299                 if (obj_request_img_data_test(obj_request))
1300                         rbd_dev = obj_request->img_request->rbd_dev;
1301                 rbd_warn(rbd_dev, "obj_request %p already marked done\n",
1302                         obj_request);
1303         }
1304 }
1305
1306 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1307 {
1308         smp_mb();
1309         return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1310 }
1311
1312 /*
1313  * This sets the KNOWN flag after (possibly) setting the EXISTS
1314  * flag.  The latter is set based on the "exists" value provided.
1315  *
1316  * Note that for our purposes once an object exists it never goes
1317  * away again.  It's possible that the response from two existence
1318  * checks are separated by the creation of the target object, and
1319  * the first ("doesn't exist") response arrives *after* the second
1320  * ("does exist").  In that case we ignore the second one.
1321  */
1322 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1323                                 bool exists)
1324 {
1325         if (exists)
1326                 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1327         set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1328         smp_mb();
1329 }
1330
1331 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1332 {
1333         smp_mb();
1334         return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1335 }
1336
1337 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1338 {
1339         smp_mb();
1340         return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1341 }
1342
1343 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1344 {
1345         dout("%s: obj %p (was %d)\n", __func__, obj_request,
1346                 atomic_read(&obj_request->kref.refcount));
1347         kref_get(&obj_request->kref);
1348 }
1349
1350 static void rbd_obj_request_destroy(struct kref *kref);
1351 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1352 {
1353         rbd_assert(obj_request != NULL);
1354         dout("%s: obj %p (was %d)\n", __func__, obj_request,
1355                 atomic_read(&obj_request->kref.refcount));
1356         kref_put(&obj_request->kref, rbd_obj_request_destroy);
1357 }
1358
1359 static void rbd_img_request_get(struct rbd_img_request *img_request)
1360 {
1361         dout("%s: img %p (was %d)\n", __func__, img_request,
1362                 atomic_read(&img_request->kref.refcount));
1363         kref_get(&img_request->kref);
1364 }
1365
1366 static void rbd_img_request_destroy(struct kref *kref);
1367 static void rbd_img_request_put(struct rbd_img_request *img_request)
1368 {
1369         rbd_assert(img_request != NULL);
1370         dout("%s: img %p (was %d)\n", __func__, img_request,
1371                 atomic_read(&img_request->kref.refcount));
1372         kref_put(&img_request->kref, rbd_img_request_destroy);
1373 }
1374
1375 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1376                                         struct rbd_obj_request *obj_request)
1377 {
1378         rbd_assert(obj_request->img_request == NULL);
1379
1380         /* Image request now owns object's original reference */
1381         obj_request->img_request = img_request;
1382         obj_request->which = img_request->obj_request_count;
1383         rbd_assert(!obj_request_img_data_test(obj_request));
1384         obj_request_img_data_set(obj_request);
1385         rbd_assert(obj_request->which != BAD_WHICH);
1386         img_request->obj_request_count++;
1387         list_add_tail(&obj_request->links, &img_request->obj_requests);
1388         dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1389                 obj_request->which);
1390 }
1391
1392 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1393                                         struct rbd_obj_request *obj_request)
1394 {
1395         rbd_assert(obj_request->which != BAD_WHICH);
1396
1397         dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1398                 obj_request->which);
1399         list_del(&obj_request->links);
1400         rbd_assert(img_request->obj_request_count > 0);
1401         img_request->obj_request_count--;
1402         rbd_assert(obj_request->which == img_request->obj_request_count);
1403         obj_request->which = BAD_WHICH;
1404         rbd_assert(obj_request_img_data_test(obj_request));
1405         rbd_assert(obj_request->img_request == img_request);
1406         obj_request->img_request = NULL;
1407         obj_request->callback = NULL;
1408         rbd_obj_request_put(obj_request);
1409 }
1410
1411 static bool obj_request_type_valid(enum obj_request_type type)
1412 {
1413         switch (type) {
1414         case OBJ_REQUEST_NODATA:
1415         case OBJ_REQUEST_BIO:
1416         case OBJ_REQUEST_PAGES:
1417                 return true;
1418         default:
1419                 return false;
1420         }
1421 }
1422
1423 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1424                                 struct rbd_obj_request *obj_request)
1425 {
1426         dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1427
1428         return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1429 }
1430
1431 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1432 {
1433
1434         dout("%s: img %p\n", __func__, img_request);
1435
1436         /*
1437          * If no error occurred, compute the aggregate transfer
1438          * count for the image request.  We could instead use
1439          * atomic64_cmpxchg() to update it as each object request
1440          * completes; not clear which way is better off hand.
1441          */
1442         if (!img_request->result) {
1443                 struct rbd_obj_request *obj_request;
1444                 u64 xferred = 0;
1445
1446                 for_each_obj_request(img_request, obj_request)
1447                         xferred += obj_request->xferred;
1448                 img_request->xferred = xferred;
1449         }
1450
1451         if (img_request->callback)
1452                 img_request->callback(img_request);
1453         else
1454                 rbd_img_request_put(img_request);
1455 }
1456
1457 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1458
1459 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1460 {
1461         dout("%s: obj %p\n", __func__, obj_request);
1462
1463         return wait_for_completion_interruptible(&obj_request->completion);
1464 }
1465
1466 /*
1467  * The default/initial value for all image request flags is 0.  Each
1468  * is conditionally set to 1 at image request initialization time
1469  * and currently never change thereafter.
1470  */
1471 static void img_request_write_set(struct rbd_img_request *img_request)
1472 {
1473         set_bit(IMG_REQ_WRITE, &img_request->flags);
1474         smp_mb();
1475 }
1476
1477 static bool img_request_write_test(struct rbd_img_request *img_request)
1478 {
1479         smp_mb();
1480         return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1481 }
1482
1483 static void img_request_child_set(struct rbd_img_request *img_request)
1484 {
1485         set_bit(IMG_REQ_CHILD, &img_request->flags);
1486         smp_mb();
1487 }
1488
1489 static bool img_request_child_test(struct rbd_img_request *img_request)
1490 {
1491         smp_mb();
1492         return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1493 }
1494
1495 static void img_request_layered_set(struct rbd_img_request *img_request)
1496 {
1497         set_bit(IMG_REQ_LAYERED, &img_request->flags);
1498         smp_mb();
1499 }
1500
1501 static bool img_request_layered_test(struct rbd_img_request *img_request)
1502 {
1503         smp_mb();
1504         return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1505 }
1506
1507 static void
1508 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1509 {
1510         u64 xferred = obj_request->xferred;
1511         u64 length = obj_request->length;
1512
1513         dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1514                 obj_request, obj_request->img_request, obj_request->result,
1515                 xferred, length);
1516         /*
1517          * ENOENT means a hole in the image.  We zero-fill the
1518          * entire length of the request.  A short read also implies
1519          * zero-fill to the end of the request.  Either way we
1520          * update the xferred count to indicate the whole request
1521          * was satisfied.
1522          */
1523         rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1524         if (obj_request->result == -ENOENT) {
1525                 if (obj_request->type == OBJ_REQUEST_BIO)
1526                         zero_bio_chain(obj_request->bio_list, 0);
1527                 else
1528                         zero_pages(obj_request->pages, 0, length);
1529                 obj_request->result = 0;
1530                 obj_request->xferred = length;
1531         } else if (xferred < length && !obj_request->result) {
1532                 if (obj_request->type == OBJ_REQUEST_BIO)
1533                         zero_bio_chain(obj_request->bio_list, xferred);
1534                 else
1535                         zero_pages(obj_request->pages, xferred, length);
1536                 obj_request->xferred = length;
1537         }
1538         obj_request_done_set(obj_request);
1539 }
1540
1541 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1542 {
1543         dout("%s: obj %p cb %p\n", __func__, obj_request,
1544                 obj_request->callback);
1545         if (obj_request->callback)
1546                 obj_request->callback(obj_request);
1547         else
1548                 complete_all(&obj_request->completion);
1549 }
1550
1551 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1552 {
1553         dout("%s: obj %p\n", __func__, obj_request);
1554         obj_request_done_set(obj_request);
1555 }
1556
1557 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1558 {
1559         struct rbd_img_request *img_request = NULL;
1560         struct rbd_device *rbd_dev = NULL;
1561         bool layered = false;
1562
1563         if (obj_request_img_data_test(obj_request)) {
1564                 img_request = obj_request->img_request;
1565                 layered = img_request && img_request_layered_test(img_request);
1566                 rbd_dev = img_request->rbd_dev;
1567         }
1568
1569         dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1570                 obj_request, img_request, obj_request->result,
1571                 obj_request->xferred, obj_request->length);
1572         if (layered && obj_request->result == -ENOENT &&
1573                         obj_request->img_offset < rbd_dev->parent_overlap)
1574                 rbd_img_parent_read(obj_request);
1575         else if (img_request)
1576                 rbd_img_obj_request_read_callback(obj_request);
1577         else
1578                 obj_request_done_set(obj_request);
1579 }
1580
1581 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1582 {
1583         dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1584                 obj_request->result, obj_request->length);
1585         /*
1586          * There is no such thing as a successful short write.  Set
1587          * it to our originally-requested length.
1588          */
1589         obj_request->xferred = obj_request->length;
1590         obj_request_done_set(obj_request);
1591 }
1592
1593 /*
1594  * For a simple stat call there's nothing to do.  We'll do more if
1595  * this is part of a write sequence for a layered image.
1596  */
1597 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1598 {
1599         dout("%s: obj %p\n", __func__, obj_request);
1600         obj_request_done_set(obj_request);
1601 }
1602
1603 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1604                                 struct ceph_msg *msg)
1605 {
1606         struct rbd_obj_request *obj_request = osd_req->r_priv;
1607         u16 opcode;
1608
1609         dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1610         rbd_assert(osd_req == obj_request->osd_req);
1611         if (obj_request_img_data_test(obj_request)) {
1612                 rbd_assert(obj_request->img_request);
1613                 rbd_assert(obj_request->which != BAD_WHICH);
1614         } else {
1615                 rbd_assert(obj_request->which == BAD_WHICH);
1616         }
1617
1618         if (osd_req->r_result < 0)
1619                 obj_request->result = osd_req->r_result;
1620
1621         BUG_ON(osd_req->r_num_ops > 2);
1622
1623         /*
1624          * We support a 64-bit length, but ultimately it has to be
1625          * passed to blk_end_request(), which takes an unsigned int.
1626          */
1627         obj_request->xferred = osd_req->r_reply_op_len[0];
1628         rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1629         opcode = osd_req->r_ops[0].op;
1630         switch (opcode) {
1631         case CEPH_OSD_OP_READ:
1632                 rbd_osd_read_callback(obj_request);
1633                 break;
1634         case CEPH_OSD_OP_WRITE:
1635                 rbd_osd_write_callback(obj_request);
1636                 break;
1637         case CEPH_OSD_OP_STAT:
1638                 rbd_osd_stat_callback(obj_request);
1639                 break;
1640         case CEPH_OSD_OP_CALL:
1641         case CEPH_OSD_OP_NOTIFY_ACK:
1642         case CEPH_OSD_OP_WATCH:
1643                 rbd_osd_trivial_callback(obj_request);
1644                 break;
1645         default:
1646                 rbd_warn(NULL, "%s: unsupported op %hu\n",
1647                         obj_request->object_name, (unsigned short) opcode);
1648                 break;
1649         }
1650
1651         if (obj_request_done_test(obj_request))
1652                 rbd_obj_request_complete(obj_request);
1653 }
1654
1655 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1656 {
1657         struct rbd_img_request *img_request = obj_request->img_request;
1658         struct ceph_osd_request *osd_req = obj_request->osd_req;
1659         u64 snap_id;
1660
1661         rbd_assert(osd_req != NULL);
1662
1663         snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1664         ceph_osdc_build_request(osd_req, obj_request->offset,
1665                         NULL, snap_id, NULL);
1666 }
1667
1668 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1669 {
1670         struct rbd_img_request *img_request = obj_request->img_request;
1671         struct ceph_osd_request *osd_req = obj_request->osd_req;
1672         struct ceph_snap_context *snapc;
1673         struct timespec mtime = CURRENT_TIME;
1674
1675         rbd_assert(osd_req != NULL);
1676
1677         snapc = img_request ? img_request->snapc : NULL;
1678         ceph_osdc_build_request(osd_req, obj_request->offset,
1679                         snapc, CEPH_NOSNAP, &mtime);
1680 }
1681
1682 static struct ceph_osd_request *rbd_osd_req_create(
1683                                         struct rbd_device *rbd_dev,
1684                                         bool write_request,
1685                                         struct rbd_obj_request *obj_request)
1686 {
1687         struct ceph_snap_context *snapc = NULL;
1688         struct ceph_osd_client *osdc;
1689         struct ceph_osd_request *osd_req;
1690
1691         if (obj_request_img_data_test(obj_request)) {
1692                 struct rbd_img_request *img_request = obj_request->img_request;
1693
1694                 rbd_assert(write_request ==
1695                                 img_request_write_test(img_request));
1696                 if (write_request)
1697                         snapc = img_request->snapc;
1698         }
1699
1700         /* Allocate and initialize the request, for the single op */
1701
1702         osdc = &rbd_dev->rbd_client->client->osdc;
1703         osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
1704         if (!osd_req)
1705                 return NULL;    /* ENOMEM */
1706
1707         if (write_request)
1708                 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1709         else
1710                 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1711
1712         osd_req->r_callback = rbd_osd_req_callback;
1713         osd_req->r_priv = obj_request;
1714
1715         osd_req->r_oid_len = strlen(obj_request->object_name);
1716         rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1717         memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1718
1719         osd_req->r_file_layout = rbd_dev->layout;       /* struct */
1720
1721         return osd_req;
1722 }
1723
1724 /*
1725  * Create a copyup osd request based on the information in the
1726  * object request supplied.  A copyup request has two osd ops,
1727  * a copyup method call, and a "normal" write request.
1728  */
1729 static struct ceph_osd_request *
1730 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1731 {
1732         struct rbd_img_request *img_request;
1733         struct ceph_snap_context *snapc;
1734         struct rbd_device *rbd_dev;
1735         struct ceph_osd_client *osdc;
1736         struct ceph_osd_request *osd_req;
1737
1738         rbd_assert(obj_request_img_data_test(obj_request));
1739         img_request = obj_request->img_request;
1740         rbd_assert(img_request);
1741         rbd_assert(img_request_write_test(img_request));
1742
1743         /* Allocate and initialize the request, for the two ops */
1744
1745         snapc = img_request->snapc;
1746         rbd_dev = img_request->rbd_dev;
1747         osdc = &rbd_dev->rbd_client->client->osdc;
1748         osd_req = ceph_osdc_alloc_request(osdc, snapc, 2, false, GFP_ATOMIC);
1749         if (!osd_req)
1750                 return NULL;    /* ENOMEM */
1751
1752         osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1753         osd_req->r_callback = rbd_osd_req_callback;
1754         osd_req->r_priv = obj_request;
1755
1756         osd_req->r_oid_len = strlen(obj_request->object_name);
1757         rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1758         memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1759
1760         osd_req->r_file_layout = rbd_dev->layout;       /* struct */
1761
1762         return osd_req;
1763 }
1764
1765
1766 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1767 {
1768         ceph_osdc_put_request(osd_req);
1769 }
1770
1771 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1772
1773 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1774                                                 u64 offset, u64 length,
1775                                                 enum obj_request_type type)
1776 {
1777         struct rbd_obj_request *obj_request;
1778         size_t size;
1779         char *name;
1780
1781         rbd_assert(obj_request_type_valid(type));
1782
1783         size = strlen(object_name) + 1;
1784         name = kmalloc(size, GFP_KERNEL);
1785         if (!name)
1786                 return NULL;
1787
1788         obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
1789         if (!obj_request) {
1790                 kfree(name);
1791                 return NULL;
1792         }
1793
1794         obj_request->object_name = memcpy(name, object_name, size);
1795         obj_request->offset = offset;
1796         obj_request->length = length;
1797         obj_request->flags = 0;
1798         obj_request->which = BAD_WHICH;
1799         obj_request->type = type;
1800         INIT_LIST_HEAD(&obj_request->links);
1801         init_completion(&obj_request->completion);
1802         kref_init(&obj_request->kref);
1803
1804         dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1805                 offset, length, (int)type, obj_request);
1806
1807         return obj_request;
1808 }
1809
1810 static void rbd_obj_request_destroy(struct kref *kref)
1811 {
1812         struct rbd_obj_request *obj_request;
1813
1814         obj_request = container_of(kref, struct rbd_obj_request, kref);
1815
1816         dout("%s: obj %p\n", __func__, obj_request);
1817
1818         rbd_assert(obj_request->img_request == NULL);
1819         rbd_assert(obj_request->which == BAD_WHICH);
1820
1821         if (obj_request->osd_req)
1822                 rbd_osd_req_destroy(obj_request->osd_req);
1823
1824         rbd_assert(obj_request_type_valid(obj_request->type));
1825         switch (obj_request->type) {
1826         case OBJ_REQUEST_NODATA:
1827                 break;          /* Nothing to do */
1828         case OBJ_REQUEST_BIO:
1829                 if (obj_request->bio_list)
1830                         bio_chain_put(obj_request->bio_list);
1831                 break;
1832         case OBJ_REQUEST_PAGES:
1833                 if (obj_request->pages)
1834                         ceph_release_page_vector(obj_request->pages,
1835                                                 obj_request->page_count);
1836                 break;
1837         }
1838
1839         kfree(obj_request->object_name);
1840         obj_request->object_name = NULL;
1841         kmem_cache_free(rbd_obj_request_cache, obj_request);
1842 }
1843
1844 /*
1845  * Caller is responsible for filling in the list of object requests
1846  * that comprises the image request, and the Linux request pointer
1847  * (if there is one).
1848  */
1849 static struct rbd_img_request *rbd_img_request_create(
1850                                         struct rbd_device *rbd_dev,
1851                                         u64 offset, u64 length,
1852                                         bool write_request,
1853                                         bool child_request)
1854 {
1855         struct rbd_img_request *img_request;
1856
1857         img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC);
1858         if (!img_request)
1859                 return NULL;
1860
1861         if (write_request) {
1862                 down_read(&rbd_dev->header_rwsem);
1863                 ceph_get_snap_context(rbd_dev->header.snapc);
1864                 up_read(&rbd_dev->header_rwsem);
1865         }
1866
1867         img_request->rq = NULL;
1868         img_request->rbd_dev = rbd_dev;
1869         img_request->offset = offset;
1870         img_request->length = length;
1871         img_request->flags = 0;
1872         if (write_request) {
1873                 img_request_write_set(img_request);
1874                 img_request->snapc = rbd_dev->header.snapc;
1875         } else {
1876                 img_request->snap_id = rbd_dev->spec->snap_id;
1877         }
1878         if (child_request)
1879                 img_request_child_set(img_request);
1880         if (rbd_dev->parent_spec)
1881                 img_request_layered_set(img_request);
1882         spin_lock_init(&img_request->completion_lock);
1883         img_request->next_completion = 0;
1884         img_request->callback = NULL;
1885         img_request->result = 0;
1886         img_request->obj_request_count = 0;
1887         INIT_LIST_HEAD(&img_request->obj_requests);
1888         kref_init(&img_request->kref);
1889
1890         rbd_img_request_get(img_request);       /* Avoid a warning */
1891         rbd_img_request_put(img_request);       /* TEMPORARY */
1892
1893         dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
1894                 write_request ? "write" : "read", offset, length,
1895                 img_request);
1896
1897         return img_request;
1898 }
1899
1900 static void rbd_img_request_destroy(struct kref *kref)
1901 {
1902         struct rbd_img_request *img_request;
1903         struct rbd_obj_request *obj_request;
1904         struct rbd_obj_request *next_obj_request;
1905
1906         img_request = container_of(kref, struct rbd_img_request, kref);
1907
1908         dout("%s: img %p\n", __func__, img_request);
1909
1910         for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1911                 rbd_img_obj_request_del(img_request, obj_request);
1912         rbd_assert(img_request->obj_request_count == 0);
1913
1914         if (img_request_write_test(img_request))
1915                 ceph_put_snap_context(img_request->snapc);
1916
1917         if (img_request_child_test(img_request))
1918                 rbd_obj_request_put(img_request->obj_request);
1919
1920         kmem_cache_free(rbd_img_request_cache, img_request);
1921 }
1922
1923 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
1924 {
1925         struct rbd_img_request *img_request;
1926         unsigned int xferred;
1927         int result;
1928         bool more;
1929
1930         rbd_assert(obj_request_img_data_test(obj_request));
1931         img_request = obj_request->img_request;
1932
1933         rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
1934         xferred = (unsigned int)obj_request->xferred;
1935         result = obj_request->result;
1936         if (result) {
1937                 struct rbd_device *rbd_dev = img_request->rbd_dev;
1938
1939                 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
1940                         img_request_write_test(img_request) ? "write" : "read",
1941                         obj_request->length, obj_request->img_offset,
1942                         obj_request->offset);
1943                 rbd_warn(rbd_dev, "  result %d xferred %x\n",
1944                         result, xferred);
1945                 if (!img_request->result)
1946                         img_request->result = result;
1947         }
1948
1949         /* Image object requests don't own their page array */
1950
1951         if (obj_request->type == OBJ_REQUEST_PAGES) {
1952                 obj_request->pages = NULL;
1953                 obj_request->page_count = 0;
1954         }
1955
1956         if (img_request_child_test(img_request)) {
1957                 rbd_assert(img_request->obj_request != NULL);
1958                 more = obj_request->which < img_request->obj_request_count - 1;
1959         } else {
1960                 rbd_assert(img_request->rq != NULL);
1961                 more = blk_end_request(img_request->rq, result, xferred);
1962         }
1963
1964         return more;
1965 }
1966
1967 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
1968 {
1969         struct rbd_img_request *img_request;
1970         u32 which = obj_request->which;
1971         bool more = true;
1972
1973         rbd_assert(obj_request_img_data_test(obj_request));
1974         img_request = obj_request->img_request;
1975
1976         dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1977         rbd_assert(img_request != NULL);
1978         rbd_assert(img_request->obj_request_count > 0);
1979         rbd_assert(which != BAD_WHICH);
1980         rbd_assert(which < img_request->obj_request_count);
1981         rbd_assert(which >= img_request->next_completion);
1982
1983         spin_lock_irq(&img_request->completion_lock);
1984         if (which != img_request->next_completion)
1985                 goto out;
1986
1987         for_each_obj_request_from(img_request, obj_request) {
1988                 rbd_assert(more);
1989                 rbd_assert(which < img_request->obj_request_count);
1990
1991                 if (!obj_request_done_test(obj_request))
1992                         break;
1993                 more = rbd_img_obj_end_request(obj_request);
1994                 which++;
1995         }
1996
1997         rbd_assert(more ^ (which == img_request->obj_request_count));
1998         img_request->next_completion = which;
1999 out:
2000         spin_unlock_irq(&img_request->completion_lock);
2001
2002         if (!more)
2003                 rbd_img_request_complete(img_request);
2004 }
2005
2006 /*
2007  * Split up an image request into one or more object requests, each
2008  * to a different object.  The "type" parameter indicates whether
2009  * "data_desc" is the pointer to the head of a list of bio
2010  * structures, or the base of a page array.  In either case this
2011  * function assumes data_desc describes memory sufficient to hold
2012  * all data described by the image request.
2013  */
2014 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2015                                         enum obj_request_type type,
2016                                         void *data_desc)
2017 {
2018         struct rbd_device *rbd_dev = img_request->rbd_dev;
2019         struct rbd_obj_request *obj_request = NULL;
2020         struct rbd_obj_request *next_obj_request;
2021         bool write_request = img_request_write_test(img_request);
2022         struct bio *bio_list;
2023         unsigned int bio_offset = 0;
2024         struct page **pages;
2025         u64 img_offset;
2026         u64 resid;
2027         u16 opcode;
2028
2029         dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2030                 (int)type, data_desc);
2031
2032         opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
2033         img_offset = img_request->offset;
2034         resid = img_request->length;
2035         rbd_assert(resid > 0);
2036
2037         if (type == OBJ_REQUEST_BIO) {
2038                 bio_list = data_desc;
2039                 rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
2040         } else {
2041                 rbd_assert(type == OBJ_REQUEST_PAGES);
2042                 pages = data_desc;
2043         }
2044
2045         while (resid) {
2046                 struct ceph_osd_request *osd_req;
2047                 const char *object_name;
2048                 u64 offset;
2049                 u64 length;
2050
2051                 object_name = rbd_segment_name(rbd_dev, img_offset);
2052                 if (!object_name)
2053                         goto out_unwind;
2054                 offset = rbd_segment_offset(rbd_dev, img_offset);
2055                 length = rbd_segment_length(rbd_dev, img_offset, resid);
2056                 obj_request = rbd_obj_request_create(object_name,
2057                                                 offset, length, type);
2058                 /* object request has its own copy of the object name */
2059                 rbd_segment_name_free(object_name);
2060                 if (!obj_request)
2061                         goto out_unwind;
2062
2063                 if (type == OBJ_REQUEST_BIO) {
2064                         unsigned int clone_size;
2065
2066                         rbd_assert(length <= (u64)UINT_MAX);
2067                         clone_size = (unsigned int)length;
2068                         obj_request->bio_list =
2069                                         bio_chain_clone_range(&bio_list,
2070                                                                 &bio_offset,
2071                                                                 clone_size,
2072                                                                 GFP_ATOMIC);
2073                         if (!obj_request->bio_list)
2074                                 goto out_partial;
2075                 } else {
2076                         unsigned int page_count;
2077
2078                         obj_request->pages = pages;
2079                         page_count = (u32)calc_pages_for(offset, length);
2080                         obj_request->page_count = page_count;
2081                         if ((offset + length) & ~PAGE_MASK)
2082                                 page_count--;   /* more on last page */
2083                         pages += page_count;
2084                 }
2085
2086                 osd_req = rbd_osd_req_create(rbd_dev, write_request,
2087                                                 obj_request);
2088                 if (!osd_req)
2089                         goto out_partial;
2090                 obj_request->osd_req = osd_req;
2091                 obj_request->callback = rbd_img_obj_callback;
2092
2093                 osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
2094                                                 0, 0);
2095                 if (type == OBJ_REQUEST_BIO)
2096                         osd_req_op_extent_osd_data_bio(osd_req, 0,
2097                                         obj_request->bio_list, length);
2098                 else
2099                         osd_req_op_extent_osd_data_pages(osd_req, 0,
2100                                         obj_request->pages, length,
2101                                         offset & ~PAGE_MASK, false, false);
2102
2103                 if (write_request)
2104                         rbd_osd_req_format_write(obj_request);
2105                 else
2106                         rbd_osd_req_format_read(obj_request);
2107
2108                 obj_request->img_offset = img_offset;
2109                 rbd_img_obj_request_add(img_request, obj_request);
2110
2111                 img_offset += length;
2112                 resid -= length;
2113         }
2114
2115         return 0;
2116
2117 out_partial:
2118         rbd_obj_request_put(obj_request);
2119 out_unwind:
2120         for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2121                 rbd_obj_request_put(obj_request);
2122
2123         return -ENOMEM;
2124 }
2125
2126 static void
2127 rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2128 {
2129         struct rbd_img_request *img_request;
2130         struct rbd_device *rbd_dev;
2131         u64 length;
2132         u32 page_count;
2133
2134         rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2135         rbd_assert(obj_request_img_data_test(obj_request));
2136         img_request = obj_request->img_request;
2137         rbd_assert(img_request);
2138
2139         rbd_dev = img_request->rbd_dev;
2140         rbd_assert(rbd_dev);
2141         length = (u64)1 << rbd_dev->header.obj_order;
2142         page_count = (u32)calc_pages_for(0, length);
2143
2144         rbd_assert(obj_request->copyup_pages);
2145         ceph_release_page_vector(obj_request->copyup_pages, page_count);
2146         obj_request->copyup_pages = NULL;
2147
2148         /*
2149          * We want the transfer count to reflect the size of the
2150          * original write request.  There is no such thing as a
2151          * successful short write, so if the request was successful
2152          * we can just set it to the originally-requested length.
2153          */
2154         if (!obj_request->result)
2155                 obj_request->xferred = obj_request->length;
2156
2157         /* Finish up with the normal image object callback */
2158
2159         rbd_img_obj_callback(obj_request);
2160 }
2161
2162 static void
2163 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2164 {
2165         struct rbd_obj_request *orig_request;
2166         struct ceph_osd_request *osd_req;
2167         struct ceph_osd_client *osdc;
2168         struct rbd_device *rbd_dev;
2169         struct page **pages;
2170         int result;
2171         u64 obj_size;
2172         u64 xferred;
2173
2174         rbd_assert(img_request_child_test(img_request));
2175
2176         /* First get what we need from the image request */
2177
2178         pages = img_request->copyup_pages;
2179         rbd_assert(pages != NULL);
2180         img_request->copyup_pages = NULL;
2181
2182         orig_request = img_request->obj_request;
2183         rbd_assert(orig_request != NULL);
2184         rbd_assert(orig_request->type == OBJ_REQUEST_BIO);
2185         result = img_request->result;
2186         obj_size = img_request->length;
2187         xferred = img_request->xferred;
2188
2189         rbd_dev = img_request->rbd_dev;
2190         rbd_assert(rbd_dev);
2191         rbd_assert(obj_size == (u64)1 << rbd_dev->header.obj_order);
2192
2193         rbd_img_request_put(img_request);
2194
2195         if (result)
2196                 goto out_err;
2197
2198         /* Allocate the new copyup osd request for the original request */
2199
2200         result = -ENOMEM;
2201         rbd_assert(!orig_request->osd_req);
2202         osd_req = rbd_osd_req_create_copyup(orig_request);
2203         if (!osd_req)
2204                 goto out_err;
2205         orig_request->osd_req = osd_req;
2206         orig_request->copyup_pages = pages;
2207
2208         /* Initialize the copyup op */
2209
2210         osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2211         osd_req_op_cls_request_data_pages(osd_req, 0, pages, obj_size, 0,
2212                                                 false, false);
2213
2214         /* Then the original write request op */
2215
2216         osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
2217                                         orig_request->offset,
2218                                         orig_request->length, 0, 0);
2219         osd_req_op_extent_osd_data_bio(osd_req, 1, orig_request->bio_list,
2220                                         orig_request->length);
2221
2222         rbd_osd_req_format_write(orig_request);
2223
2224         /* All set, send it off. */
2225
2226         orig_request->callback = rbd_img_obj_copyup_callback;
2227         osdc = &rbd_dev->rbd_client->client->osdc;
2228         result = rbd_obj_request_submit(osdc, orig_request);
2229         if (!result)
2230                 return;
2231 out_err:
2232         /* Record the error code and complete the request */
2233
2234         orig_request->result = result;
2235         orig_request->xferred = 0;
2236         obj_request_done_set(orig_request);
2237         rbd_obj_request_complete(orig_request);
2238 }
2239
2240 /*
2241  * Read from the parent image the range of data that covers the
2242  * entire target of the given object request.  This is used for
2243  * satisfying a layered image write request when the target of an
2244  * object request from the image request does not exist.
2245  *
2246  * A page array big enough to hold the returned data is allocated
2247  * and supplied to rbd_img_request_fill() as the "data descriptor."
2248  * When the read completes, this page array will be transferred to
2249  * the original object request for the copyup operation.
2250  *
2251  * If an error occurs, record it as the result of the original
2252  * object request and mark it done so it gets completed.
2253  */
2254 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2255 {
2256         struct rbd_img_request *img_request = NULL;
2257         struct rbd_img_request *parent_request = NULL;
2258         struct rbd_device *rbd_dev;
2259         u64 img_offset;
2260         u64 length;
2261         struct page **pages = NULL;
2262         u32 page_count;
2263         int result;
2264
2265         rbd_assert(obj_request_img_data_test(obj_request));
2266         rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2267
2268         img_request = obj_request->img_request;
2269         rbd_assert(img_request != NULL);
2270         rbd_dev = img_request->rbd_dev;
2271         rbd_assert(rbd_dev->parent != NULL);
2272
2273         /*
2274          * First things first.  The original osd request is of no
2275          * use to use any more, we'll need a new one that can hold
2276          * the two ops in a copyup request.  We'll get that later,
2277          * but for now we can release the old one.
2278          */
2279         rbd_osd_req_destroy(obj_request->osd_req);
2280         obj_request->osd_req = NULL;
2281
2282         /*
2283          * Determine the byte range covered by the object in the
2284          * child image to which the original request was to be sent.
2285          */
2286         img_offset = obj_request->img_offset - obj_request->offset;
2287         length = (u64)1 << rbd_dev->header.obj_order;
2288
2289         /*
2290          * There is no defined parent data beyond the parent
2291          * overlap, so limit what we read at that boundary if
2292          * necessary.
2293          */
2294         if (img_offset + length > rbd_dev->parent_overlap) {
2295                 rbd_assert(img_offset < rbd_dev->parent_overlap);
2296                 length = rbd_dev->parent_overlap - img_offset;
2297         }
2298
2299         /*
2300          * Allocate a page array big enough to receive the data read
2301          * from the parent.
2302          */
2303         page_count = (u32)calc_pages_for(0, length);
2304         pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2305         if (IS_ERR(pages)) {
2306                 result = PTR_ERR(pages);
2307                 pages = NULL;
2308                 goto out_err;
2309         }
2310
2311         result = -ENOMEM;
2312         parent_request = rbd_img_request_create(rbd_dev->parent,
2313                                                 img_offset, length,
2314                                                 false, true);
2315         if (!parent_request)
2316                 goto out_err;
2317         rbd_obj_request_get(obj_request);
2318         parent_request->obj_request = obj_request;
2319
2320         result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2321         if (result)
2322                 goto out_err;
2323         parent_request->copyup_pages = pages;
2324
2325         parent_request->callback = rbd_img_obj_parent_read_full_callback;
2326         result = rbd_img_request_submit(parent_request);
2327         if (!result)
2328                 return 0;
2329
2330         parent_request->copyup_pages = NULL;
2331         parent_request->obj_request = NULL;
2332         rbd_obj_request_put(obj_request);
2333 out_err:
2334         if (pages)
2335                 ceph_release_page_vector(pages, page_count);
2336         if (parent_request)
2337                 rbd_img_request_put(parent_request);
2338         obj_request->result = result;
2339         obj_request->xferred = 0;
2340         obj_request_done_set(obj_request);
2341
2342         return result;
2343 }
2344
2345 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2346 {
2347         struct rbd_obj_request *orig_request;
2348         int result;
2349
2350         rbd_assert(!obj_request_img_data_test(obj_request));
2351
2352         /*
2353          * All we need from the object request is the original
2354          * request and the result of the STAT op.  Grab those, then
2355          * we're done with the request.
2356          */
2357         orig_request = obj_request->obj_request;
2358         obj_request->obj_request = NULL;
2359         rbd_assert(orig_request);
2360         rbd_assert(orig_request->img_request);
2361
2362         result = obj_request->result;
2363         obj_request->result = 0;
2364
2365         dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2366                 obj_request, orig_request, result,
2367                 obj_request->xferred, obj_request->length);
2368         rbd_obj_request_put(obj_request);
2369
2370         rbd_assert(orig_request);
2371         rbd_assert(orig_request->img_request);
2372
2373         /*
2374          * Our only purpose here is to determine whether the object
2375          * exists, and we don't want to treat the non-existence as
2376          * an error.  If something else comes back, transfer the
2377          * error to the original request and complete it now.
2378          */
2379         if (!result) {
2380                 obj_request_existence_set(orig_request, true);
2381         } else if (result == -ENOENT) {
2382                 obj_request_existence_set(orig_request, false);
2383         } else if (result) {
2384                 orig_request->result = result;
2385                 goto out;
2386         }
2387
2388         /*
2389          * Resubmit the original request now that we have recorded
2390          * whether the target object exists.
2391          */
2392         orig_request->result = rbd_img_obj_request_submit(orig_request);
2393 out:
2394         if (orig_request->result)
2395                 rbd_obj_request_complete(orig_request);
2396         rbd_obj_request_put(orig_request);
2397 }
2398
2399 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2400 {
2401         struct rbd_obj_request *stat_request;
2402         struct rbd_device *rbd_dev;
2403         struct ceph_osd_client *osdc;
2404         struct page **pages = NULL;
2405         u32 page_count;
2406         size_t size;
2407         int ret;
2408
2409         /*
2410          * The response data for a STAT call consists of:
2411          *     le64 length;
2412          *     struct {
2413          *         le32 tv_sec;
2414          *         le32 tv_nsec;
2415          *     } mtime;
2416          */
2417         size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2418         page_count = (u32)calc_pages_for(0, size);
2419         pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2420         if (IS_ERR(pages))
2421                 return PTR_ERR(pages);
2422
2423         ret = -ENOMEM;
2424         stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2425                                                         OBJ_REQUEST_PAGES);
2426         if (!stat_request)
2427                 goto out;
2428
2429         rbd_obj_request_get(obj_request);
2430         stat_request->obj_request = obj_request;
2431         stat_request->pages = pages;
2432         stat_request->page_count = page_count;
2433
2434         rbd_assert(obj_request->img_request);
2435         rbd_dev = obj_request->img_request->rbd_dev;
2436         stat_request->osd_req = rbd_osd_req_create(rbd_dev, false,
2437                                                 stat_request);
2438         if (!stat_request->osd_req)
2439                 goto out;
2440         stat_request->callback = rbd_img_obj_exists_callback;
2441
2442         osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2443         osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2444                                         false, false);
2445         rbd_osd_req_format_read(stat_request);
2446
2447         osdc = &rbd_dev->rbd_client->client->osdc;
2448         ret = rbd_obj_request_submit(osdc, stat_request);
2449 out:
2450         if (ret)
2451                 rbd_obj_request_put(obj_request);
2452
2453         return ret;
2454 }
2455
2456 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2457 {
2458         struct rbd_img_request *img_request;
2459         struct rbd_device *rbd_dev;
2460         bool known;
2461
2462         rbd_assert(obj_request_img_data_test(obj_request));
2463
2464         img_request = obj_request->img_request;
2465         rbd_assert(img_request);
2466         rbd_dev = img_request->rbd_dev;
2467
2468         /*
2469          * Only writes to layered images need special handling.
2470          * Reads and non-layered writes are simple object requests.
2471          * Layered writes that start beyond the end of the overlap
2472          * with the parent have no parent data, so they too are
2473          * simple object requests.  Finally, if the target object is
2474          * known to already exist, its parent data has already been
2475          * copied, so a write to the object can also be handled as a
2476          * simple object request.
2477          */
2478         if (!img_request_write_test(img_request) ||
2479                 !img_request_layered_test(img_request) ||
2480                 rbd_dev->parent_overlap <= obj_request->img_offset ||
2481                 ((known = obj_request_known_test(obj_request)) &&
2482                         obj_request_exists_test(obj_request))) {
2483
2484                 struct rbd_device *rbd_dev;
2485                 struct ceph_osd_client *osdc;
2486
2487                 rbd_dev = obj_request->img_request->rbd_dev;
2488                 osdc = &rbd_dev->rbd_client->client->osdc;
2489
2490                 return rbd_obj_request_submit(osdc, obj_request);
2491         }
2492
2493         /*
2494          * It's a layered write.  The target object might exist but
2495          * we may not know that yet.  If we know it doesn't exist,
2496          * start by reading the data for the full target object from
2497          * the parent so we can use it for a copyup to the target.
2498          */
2499         if (known)
2500                 return rbd_img_obj_parent_read_full(obj_request);
2501
2502         /* We don't know whether the target exists.  Go find out. */
2503
2504         return rbd_img_obj_exists_submit(obj_request);
2505 }
2506
2507 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2508 {
2509         struct rbd_obj_request *obj_request;
2510         struct rbd_obj_request *next_obj_request;
2511
2512         dout("%s: img %p\n", __func__, img_request);
2513         for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2514                 int ret;
2515
2516                 ret = rbd_img_obj_request_submit(obj_request);
2517                 if (ret)
2518                         return ret;
2519         }
2520
2521         return 0;
2522 }
2523
2524 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2525 {
2526         struct rbd_obj_request *obj_request;
2527         struct rbd_device *rbd_dev;
2528         u64 obj_end;
2529
2530         rbd_assert(img_request_child_test(img_request));
2531
2532         obj_request = img_request->obj_request;
2533         rbd_assert(obj_request);
2534         rbd_assert(obj_request->img_request);
2535
2536         obj_request->result = img_request->result;
2537         if (obj_request->result)
2538                 goto out;
2539
2540         /*
2541          * We need to zero anything beyond the parent overlap
2542          * boundary.  Since rbd_img_obj_request_read_callback()
2543          * will zero anything beyond the end of a short read, an
2544          * easy way to do this is to pretend the data from the
2545          * parent came up short--ending at the overlap boundary.
2546          */
2547         rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2548         obj_end = obj_request->img_offset + obj_request->length;
2549         rbd_dev = obj_request->img_request->rbd_dev;
2550         if (obj_end > rbd_dev->parent_overlap) {
2551                 u64 xferred = 0;
2552
2553                 if (obj_request->img_offset < rbd_dev->parent_overlap)
2554                         xferred = rbd_dev->parent_overlap -
2555                                         obj_request->img_offset;
2556
2557                 obj_request->xferred = min(img_request->xferred, xferred);
2558         } else {
2559                 obj_request->xferred = img_request->xferred;
2560         }
2561 out:
2562         rbd_img_request_put(img_request);
2563         rbd_img_obj_request_read_callback(obj_request);
2564         rbd_obj_request_complete(obj_request);
2565 }
2566
2567 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2568 {
2569         struct rbd_device *rbd_dev;
2570         struct rbd_img_request *img_request;
2571         int result;
2572
2573         rbd_assert(obj_request_img_data_test(obj_request));
2574         rbd_assert(obj_request->img_request != NULL);
2575         rbd_assert(obj_request->result == (s32) -ENOENT);
2576         rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2577
2578         rbd_dev = obj_request->img_request->rbd_dev;
2579         rbd_assert(rbd_dev->parent != NULL);
2580         /* rbd_read_finish(obj_request, obj_request->length); */
2581         img_request = rbd_img_request_create(rbd_dev->parent,
2582                                                 obj_request->img_offset,
2583                                                 obj_request->length,
2584                                                 false, true);
2585         result = -ENOMEM;
2586         if (!img_request)
2587                 goto out_err;
2588
2589         rbd_obj_request_get(obj_request);
2590         img_request->obj_request = obj_request;
2591
2592         result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2593                                         obj_request->bio_list);
2594         if (result)
2595                 goto out_err;
2596
2597         img_request->callback = rbd_img_parent_read_callback;
2598         result = rbd_img_request_submit(img_request);
2599         if (result)
2600                 goto out_err;
2601
2602         return;
2603 out_err:
2604         if (img_request)
2605                 rbd_img_request_put(img_request);
2606         obj_request->result = result;
2607         obj_request->xferred = 0;
2608         obj_request_done_set(obj_request);
2609 }
2610
2611 static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id)
2612 {
2613         struct rbd_obj_request *obj_request;
2614         struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2615         int ret;
2616
2617         obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2618                                                         OBJ_REQUEST_NODATA);
2619         if (!obj_request)
2620                 return -ENOMEM;
2621
2622         ret = -ENOMEM;
2623         obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2624         if (!obj_request->osd_req)
2625                 goto out;
2626         obj_request->callback = rbd_obj_request_put;
2627
2628         osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
2629                                         notify_id, 0, 0);
2630         rbd_osd_req_format_read(obj_request);
2631
2632         ret = rbd_obj_request_submit(osdc, obj_request);
2633 out:
2634         if (ret)
2635                 rbd_obj_request_put(obj_request);
2636
2637         return ret;
2638 }
2639
2640 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2641 {
2642         struct rbd_device *rbd_dev = (struct rbd_device *)data;
2643         int ret;
2644
2645         if (!rbd_dev)
2646                 return;
2647
2648         dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
2649                 rbd_dev->header_name, (unsigned long long)notify_id,
2650                 (unsigned int)opcode);
2651         ret = rbd_dev_refresh(rbd_dev);
2652         if (ret)
2653                 rbd_warn(rbd_dev, ": header refresh error (%d)\n", ret);
2654
2655         rbd_obj_notify_ack(rbd_dev, notify_id);
2656 }
2657
2658 /*
2659  * Request sync osd watch/unwatch.  The value of "start" determines
2660  * whether a watch request is being initiated or torn down.
2661  */
2662 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
2663 {
2664         struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2665         struct rbd_obj_request *obj_request;
2666         int ret;
2667
2668         rbd_assert(start ^ !!rbd_dev->watch_event);
2669         rbd_assert(start ^ !!rbd_dev->watch_request);
2670
2671         if (start) {
2672                 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
2673                                                 &rbd_dev->watch_event);
2674                 if (ret < 0)
2675                         return ret;
2676                 rbd_assert(rbd_dev->watch_event != NULL);
2677         }
2678
2679         ret = -ENOMEM;
2680         obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2681                                                         OBJ_REQUEST_NODATA);
2682         if (!obj_request)
2683                 goto out_cancel;
2684
2685         obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request);
2686         if (!obj_request->osd_req)
2687                 goto out_cancel;
2688
2689         if (start)
2690                 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
2691         else
2692                 ceph_osdc_unregister_linger_request(osdc,
2693                                         rbd_dev->watch_request->osd_req);
2694
2695         osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2696                                 rbd_dev->watch_event->cookie, 0, start);
2697         rbd_osd_req_format_write(obj_request);
2698
2699         ret = rbd_obj_request_submit(osdc, obj_request);
2700         if (ret)
2701                 goto out_cancel;
2702         ret = rbd_obj_request_wait(obj_request);
2703         if (ret)
2704                 goto out_cancel;
2705         ret = obj_request->result;
2706         if (ret)
2707                 goto out_cancel;
2708
2709         /*
2710          * A watch request is set to linger, so the underlying osd
2711          * request won't go away until we unregister it.  We retain
2712          * a pointer to the object request during that time (in
2713          * rbd_dev->watch_request), so we'll keep a reference to
2714          * it.  We'll drop that reference (below) after we've
2715          * unregistered it.
2716          */
2717         if (start) {
2718                 rbd_dev->watch_request = obj_request;
2719
2720                 return 0;
2721         }
2722
2723         /* We have successfully torn down the watch request */
2724
2725         rbd_obj_request_put(rbd_dev->watch_request);
2726         rbd_dev->watch_request = NULL;
2727 out_cancel:
2728         /* Cancel the event if we're tearing down, or on error */
2729         ceph_osdc_cancel_event(rbd_dev->watch_event);
2730         rbd_dev->watch_event = NULL;
2731         if (obj_request)
2732                 rbd_obj_request_put(obj_request);
2733
2734         return ret;
2735 }
2736
2737 /*
2738  * Synchronous osd object method call.  Returns the number of bytes
2739  * returned in the outbound buffer, or a negative error code.
2740  */
2741 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
2742                              const char *object_name,
2743                              const char *class_name,
2744                              const char *method_name,
2745                              const void *outbound,
2746                              size_t outbound_size,
2747                              void *inbound,
2748                              size_t inbound_size)
2749 {
2750         struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2751         struct rbd_obj_request *obj_request;
2752         struct page **pages;
2753         u32 page_count;
2754         int ret;
2755
2756         /*
2757          * Method calls are ultimately read operations.  The result
2758          * should placed into the inbound buffer provided.  They
2759          * also supply outbound data--parameters for the object
2760          * method.  Currently if this is present it will be a
2761          * snapshot id.
2762          */
2763         page_count = (u32)calc_pages_for(0, inbound_size);
2764         pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2765         if (IS_ERR(pages))
2766                 return PTR_ERR(pages);
2767
2768         ret = -ENOMEM;
2769         obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
2770                                                         OBJ_REQUEST_PAGES);
2771         if (!obj_request)
2772                 goto out;
2773
2774         obj_request->pages = pages;
2775         obj_request->page_count = page_count;
2776
2777         obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2778         if (!obj_request->osd_req)
2779                 goto out;
2780
2781         osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
2782                                         class_name, method_name);
2783         if (outbound_size) {
2784                 struct ceph_pagelist *pagelist;
2785
2786                 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
2787                 if (!pagelist)
2788                         goto out;
2789
2790                 ceph_pagelist_init(pagelist);
2791                 ceph_pagelist_append(pagelist, outbound, outbound_size);
2792                 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
2793                                                 pagelist);
2794         }
2795         osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
2796                                         obj_request->pages, inbound_size,
2797                                         0, false, false);
2798         rbd_osd_req_format_read(obj_request);
2799
2800         ret = rbd_obj_request_submit(osdc, obj_request);
2801         if (ret)
2802                 goto out;
2803         ret = rbd_obj_request_wait(obj_request);
2804         if (ret)
2805                 goto out;
2806
2807         ret = obj_request->result;
2808         if (ret < 0)
2809                 goto out;
2810
2811         rbd_assert(obj_request->xferred < (u64)INT_MAX);
2812         ret = (int)obj_request->xferred;
2813         ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
2814 out:
2815         if (obj_request)
2816                 rbd_obj_request_put(obj_request);
2817         else
2818                 ceph_release_page_vector(pages, page_count);
2819
2820         return ret;
2821 }
2822
2823 static void rbd_request_fn(struct request_queue *q)
2824                 __releases(q->queue_lock) __acquires(q->queue_lock)
2825 {
2826         struct rbd_device *rbd_dev = q->queuedata;
2827         bool read_only = rbd_dev->mapping.read_only;
2828         struct request *rq;
2829         int result;
2830
2831         while ((rq = blk_fetch_request(q))) {
2832                 bool write_request = rq_data_dir(rq) == WRITE;
2833                 struct rbd_img_request *img_request;
2834                 u64 offset;
2835                 u64 length;
2836
2837                 /* Ignore any non-FS requests that filter through. */
2838
2839                 if (rq->cmd_type != REQ_TYPE_FS) {
2840                         dout("%s: non-fs request type %d\n", __func__,
2841                                 (int) rq->cmd_type);
2842                         __blk_end_request_all(rq, 0);
2843                         continue;
2844                 }
2845
2846                 /* Ignore/skip any zero-length requests */
2847
2848                 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
2849                 length = (u64) blk_rq_bytes(rq);
2850
2851                 if (!length) {
2852                         dout("%s: zero-length request\n", __func__);
2853                         __blk_end_request_all(rq, 0);
2854                         continue;
2855                 }
2856
2857                 spin_unlock_irq(q->queue_lock);
2858
2859                 /* Disallow writes to a read-only device */
2860
2861                 if (write_request) {
2862                         result = -EROFS;
2863                         if (read_only)
2864                                 goto end_request;
2865                         rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
2866                 }
2867
2868                 /*
2869                  * Quit early if the mapped snapshot no longer
2870                  * exists.  It's still possible the snapshot will
2871                  * have disappeared by the time our request arrives
2872                  * at the osd, but there's no sense in sending it if
2873                  * we already know.
2874                  */
2875                 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
2876                         dout("request for non-existent snapshot");
2877                         rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
2878                         result = -ENXIO;
2879                         goto end_request;
2880                 }
2881
2882                 result = -EINVAL;
2883                 if (offset && length > U64_MAX - offset + 1) {
2884                         rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
2885                                 offset, length);
2886                         goto end_request;       /* Shouldn't happen */
2887                 }
2888
2889                 result = -EIO;
2890                 if (offset + length > rbd_dev->mapping.size) {
2891                         rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
2892                                 offset, length, rbd_dev->mapping.size);
2893                         goto end_request;
2894                 }
2895
2896                 result = -ENOMEM;
2897                 img_request = rbd_img_request_create(rbd_dev, offset, length,
2898                                                         write_request, false);
2899                 if (!img_request)
2900                         goto end_request;
2901
2902                 img_request->rq = rq;
2903
2904                 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2905                                                 rq->bio);
2906                 if (!result)
2907                         result = rbd_img_request_submit(img_request);
2908                 if (result)
2909                         rbd_img_request_put(img_request);
2910 end_request:
2911                 spin_lock_irq(q->queue_lock);
2912                 if (result < 0) {
2913                         rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
2914                                 write_request ? "write" : "read",
2915                                 length, offset, result);
2916
2917                         __blk_end_request_all(rq, result);
2918                 }
2919         }
2920 }
2921
2922 /*
2923  * a queue callback. Makes sure that we don't create a bio that spans across
2924  * multiple osd objects. One exception would be with a single page bios,
2925  * which we handle later at bio_chain_clone_range()
2926  */
2927 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
2928                           struct bio_vec *bvec)
2929 {
2930         struct rbd_device *rbd_dev = q->queuedata;
2931         sector_t sector_offset;
2932         sector_t sectors_per_obj;
2933         sector_t obj_sector_offset;
2934         int ret;
2935
2936         /*
2937          * Find how far into its rbd object the partition-relative
2938          * bio start sector is to offset relative to the enclosing
2939          * device.
2940          */
2941         sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
2942         sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
2943         obj_sector_offset = sector_offset & (sectors_per_obj - 1);
2944
2945         /*
2946          * Compute the number of bytes from that offset to the end
2947          * of the object.  Account for what's already used by the bio.
2948          */
2949         ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
2950         if (ret > bmd->bi_size)
2951                 ret -= bmd->bi_size;
2952         else
2953                 ret = 0;
2954
2955         /*
2956          * Don't send back more than was asked for.  And if the bio
2957          * was empty, let the whole thing through because:  "Note
2958          * that a block device *must* allow a single page to be
2959          * added to an empty bio."
2960          */
2961         rbd_assert(bvec->bv_len <= PAGE_SIZE);
2962         if (ret > (int) bvec->bv_len || !bmd->bi_size)
2963                 ret = (int) bvec->bv_len;
2964
2965         return ret;
2966 }
2967
2968 static void rbd_free_disk(struct rbd_device *rbd_dev)
2969 {
2970         struct gendisk *disk = rbd_dev->disk;
2971
2972         if (!disk)
2973                 return;
2974
2975         rbd_dev->disk = NULL;
2976         if (disk->flags & GENHD_FL_UP) {
2977                 del_gendisk(disk);
2978                 if (disk->queue)
2979                         blk_cleanup_queue(disk->queue);
2980         }
2981         put_disk(disk);
2982 }
2983
2984 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
2985                                 const char *object_name,
2986                                 u64 offset, u64 length, void *buf)
2987
2988 {
2989         struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2990         struct rbd_obj_request *obj_request;
2991         struct page **pages = NULL;
2992         u32 page_count;
2993         size_t size;
2994         int ret;
2995
2996         page_count = (u32) calc_pages_for(offset, length);
2997         pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2998         if (IS_ERR(pages))
2999                 ret = PTR_ERR(pages);
3000
3001         ret = -ENOMEM;
3002         obj_request = rbd_obj_request_create(object_name, offset, length,
3003                                                         OBJ_REQUEST_PAGES);
3004         if (!obj_request)
3005                 goto out;
3006
3007         obj_request->pages = pages;
3008         obj_request->page_count = page_count;
3009
3010         obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
3011         if (!obj_request->osd_req)
3012                 goto out;
3013
3014         osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3015                                         offset, length, 0, 0);
3016         osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3017                                         obj_request->pages,
3018                                         obj_request->length,
3019                                         obj_request->offset & ~PAGE_MASK,
3020                                         false, false);
3021         rbd_osd_req_format_read(obj_request);
3022
3023         ret = rbd_obj_request_submit(osdc, obj_request);
3024         if (ret)
3025                 goto out;
3026         ret = rbd_obj_request_wait(obj_request);
3027         if (ret)
3028                 goto out;
3029
3030         ret = obj_request->result;
3031         if (ret < 0)
3032                 goto out;
3033
3034         rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3035         size = (size_t) obj_request->xferred;
3036         ceph_copy_from_page_vector(pages, buf, 0, size);
3037         rbd_assert(size <= (size_t)INT_MAX);
3038         ret = (int)size;
3039 out:
3040         if (obj_request)
3041                 rbd_obj_request_put(obj_request);
3042         else
3043                 ceph_release_page_vector(pages, page_count);
3044
3045         return ret;
3046 }
3047
3048 /*
3049  * Read the complete header for the given rbd device.  On successful
3050  * return, the rbd_dev->header field will contain up-to-date
3051  * information about the image.
3052  */
3053 static int rbd_dev_v1_header_read(struct rbd_device *rbd_dev)
3054 {
3055         struct rbd_image_header_ondisk *ondisk = NULL;
3056         u32 snap_count = 0;
3057         u64 names_size = 0;
3058         u32 want_count;
3059         int ret;
3060
3061         /*
3062          * The complete header will include an array of its 64-bit
3063          * snapshot ids, followed by the names of those snapshots as
3064          * a contiguous block of NUL-terminated strings.  Note that
3065          * the number of snapshots could change by the time we read
3066          * it in, in which case we re-read it.
3067          */
3068         do {
3069                 size_t size;
3070
3071                 kfree(ondisk);
3072
3073                 size = sizeof (*ondisk);
3074                 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3075                 size += names_size;
3076                 ondisk = kmalloc(size, GFP_KERNEL);
3077                 if (!ondisk)
3078                         return -ENOMEM;
3079
3080                 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3081                                        0, size, ondisk);
3082                 if (ret < 0)
3083                         goto out;
3084                 if ((size_t)ret < size) {
3085                         ret = -ENXIO;
3086                         rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3087                                 size, ret);
3088                         goto out;
3089                 }
3090                 if (!rbd_dev_ondisk_valid(ondisk)) {
3091                         ret = -ENXIO;
3092                         rbd_warn(rbd_dev, "invalid header");
3093                         goto out;
3094                 }
3095
3096                 names_size = le64_to_cpu(ondisk->snap_names_len);
3097                 want_count = snap_count;
3098                 snap_count = le32_to_cpu(ondisk->snap_count);
3099         } while (snap_count != want_count);
3100
3101         ret = rbd_header_from_disk(rbd_dev, ondisk);
3102 out:
3103         kfree(ondisk);
3104
3105         return ret;
3106 }
3107
3108 /*
3109  * only read the first part of the ondisk header, without the snaps info
3110  */
3111 static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev)
3112 {
3113         return rbd_dev_v1_header_read(rbd_dev);
3114 }
3115
3116 /*
3117  * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3118  * has disappeared from the (just updated) snapshot context.
3119  */
3120 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3121 {
3122         u64 snap_id;
3123
3124         if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3125                 return;
3126
3127         snap_id = rbd_dev->spec->snap_id;
3128         if (snap_id == CEPH_NOSNAP)
3129                 return;
3130
3131         if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3132                 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3133 }
3134
3135 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3136 {
3137         u64 mapping_size;
3138         int ret;
3139
3140         rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
3141         mapping_size = rbd_dev->mapping.size;
3142         mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
3143         if (rbd_dev->image_format == 1)
3144                 ret = rbd_dev_v1_refresh(rbd_dev);
3145         else
3146                 ret = rbd_dev_v2_refresh(rbd_dev);
3147
3148         /* If it's a mapped snapshot, validate its EXISTS flag */
3149
3150         rbd_exists_validate(rbd_dev);
3151         mutex_unlock(&ctl_mutex);
3152         if (mapping_size != rbd_dev->mapping.size) {
3153                 sector_t size;
3154
3155                 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3156                 dout("setting size to %llu sectors", (unsigned long long)size);
3157                 set_capacity(rbd_dev->disk, size);
3158                 revalidate_disk(rbd_dev->disk);
3159         }
3160
3161         return ret;
3162 }
3163
3164 static int rbd_init_disk(struct rbd_device *rbd_dev)
3165 {
3166         struct gendisk *disk;
3167         struct request_queue *q;
3168         u64 segment_size;
3169
3170         /* create gendisk info */
3171         disk = alloc_disk(RBD_MINORS_PER_MAJOR);
3172         if (!disk)
3173                 return -ENOMEM;
3174
3175         snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3176                  rbd_dev->dev_id);
3177         disk->major = rbd_dev->major;
3178         disk->first_minor = 0;
3179         disk->fops = &rbd_bd_ops;
3180         disk->private_data = rbd_dev;
3181
3182         q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
3183         if (!q)
3184                 goto out_disk;
3185
3186         /* We use the default size, but let's be explicit about it. */
3187         blk_queue_physical_block_size(q, SECTOR_SIZE);
3188
3189         /* set io sizes to object size */
3190         segment_size = rbd_obj_bytes(&rbd_dev->header);
3191         blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3192         blk_queue_max_segment_size(q, segment_size);
3193         blk_queue_io_min(q, segment_size);
3194         blk_queue_io_opt(q, segment_size);
3195
3196         blk_queue_merge_bvec(q, rbd_merge_bvec);
3197         disk->queue = q;
3198
3199         q->queuedata = rbd_dev;
3200
3201         rbd_dev->disk = disk;
3202
3203         return 0;
3204 out_disk:
3205         put_disk(disk);
3206
3207         return -ENOMEM;
3208 }
3209
3210 /*
3211   sysfs
3212 */
3213
3214 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3215 {
3216         return container_of(dev, struct rbd_device, dev);
3217 }
3218
3219 static ssize_t rbd_size_show(struct device *dev,
3220                              struct device_attribute *attr, char *buf)
3221 {
3222         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3223
3224         return sprintf(buf, "%llu\n",
3225                 (unsigned long long)rbd_dev->mapping.size);
3226 }
3227
3228 /*
3229  * Note this shows the features for whatever's mapped, which is not
3230  * necessarily the base image.
3231  */
3232 static ssize_t rbd_features_show(struct device *dev,
3233                              struct device_attribute *attr, char *buf)
3234 {
3235         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3236
3237         return sprintf(buf, "0x%016llx\n",
3238                         (unsigned long long)rbd_dev->mapping.features);
3239 }
3240
3241 static ssize_t rbd_major_show(struct device *dev,
3242                               struct device_attribute *attr, char *buf)
3243 {
3244         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3245
3246         if (rbd_dev->major)
3247                 return sprintf(buf, "%d\n", rbd_dev->major);
3248
3249         return sprintf(buf, "(none)\n");
3250
3251 }
3252
3253 static ssize_t rbd_client_id_show(struct device *dev,
3254                                   struct device_attribute *attr, char *buf)
3255 {
3256         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3257
3258         return sprintf(buf, "client%lld\n",
3259                         ceph_client_id(rbd_dev->rbd_client->client));
3260 }
3261
3262 static ssize_t rbd_pool_show(struct device *dev,
3263                              struct device_attribute *attr, char *buf)
3264 {
3265         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3266
3267         return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3268 }
3269
3270 static ssize_t rbd_pool_id_show(struct device *dev,
3271                              struct device_attribute *attr, char *buf)
3272 {
3273         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3274
3275         return sprintf(buf, "%llu\n",
3276                         (unsigned long long) rbd_dev->spec->pool_id);
3277 }
3278
3279 static ssize_t rbd_name_show(struct device *dev,
3280                              struct device_attribute *attr, char *buf)
3281 {
3282         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3283
3284         if (rbd_dev->spec->image_name)
3285                 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3286
3287         return sprintf(buf, "(unknown)\n");
3288 }
3289
3290 static ssize_t rbd_image_id_show(struct device *dev,
3291                              struct device_attribute *attr, char *buf)
3292 {
3293         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3294
3295         return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3296 }
3297
3298 /*
3299  * Shows the name of the currently-mapped snapshot (or
3300  * RBD_SNAP_HEAD_NAME for the base image).
3301  */
3302 static ssize_t rbd_snap_show(struct device *dev,
3303                              struct device_attribute *attr,
3304                              char *buf)
3305 {
3306         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3307
3308         return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3309 }
3310
3311 /*
3312  * For an rbd v2 image, shows the pool id, image id, and snapshot id
3313  * for the parent image.  If there is no parent, simply shows
3314  * "(no parent image)".
3315  */
3316 static ssize_t rbd_parent_show(struct device *dev,
3317                              struct device_attribute *attr,
3318                              char *buf)
3319 {
3320         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3321         struct rbd_spec *spec = rbd_dev->parent_spec;
3322         int count;
3323         char *bufp = buf;
3324
3325         if (!spec)
3326                 return sprintf(buf, "(no parent image)\n");
3327
3328         count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
3329                         (unsigned long long) spec->pool_id, spec->pool_name);
3330         if (count < 0)
3331                 return count;
3332         bufp += count;
3333
3334         count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
3335                         spec->image_name ? spec->image_name : "(unknown)");
3336         if (count < 0)
3337                 return count;
3338         bufp += count;
3339
3340         count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
3341                         (unsigned long long) spec->snap_id, spec->snap_name);
3342         if (count < 0)
3343                 return count;
3344         bufp += count;
3345
3346         count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
3347         if (count < 0)
3348                 return count;
3349         bufp += count;
3350
3351         return (ssize_t) (bufp - buf);
3352 }
3353
3354 static ssize_t rbd_image_refresh(struct device *dev,
3355                                  struct device_attribute *attr,
3356                                  const char *buf,
3357                                  size_t size)
3358 {
3359         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3360         int ret;
3361
3362         ret = rbd_dev_refresh(rbd_dev);
3363         if (ret)
3364                 rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret);
3365
3366         return ret < 0 ? ret : size;
3367 }
3368
3369 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3370 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3371 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3372 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3373 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3374 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3375 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3376 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3377 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3378 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3379 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3380
3381 static struct attribute *rbd_attrs[] = {
3382         &dev_attr_size.attr,
3383         &dev_attr_features.attr,
3384         &dev_attr_major.attr,
3385         &dev_attr_client_id.attr,
3386         &dev_attr_pool.attr,
3387         &dev_attr_pool_id.attr,
3388         &dev_attr_name.attr,
3389         &dev_attr_image_id.attr,
3390         &dev_attr_current_snap.attr,
3391         &dev_attr_parent.attr,
3392         &dev_attr_refresh.attr,
3393         NULL
3394 };
3395
3396 static struct attribute_group rbd_attr_group = {
3397         .attrs = rbd_attrs,
3398 };
3399
3400 static const struct attribute_group *rbd_attr_groups[] = {
3401         &rbd_attr_group,
3402         NULL
3403 };
3404
3405 static void rbd_sysfs_dev_release(struct device *dev)
3406 {
3407 }
3408
3409 static struct device_type rbd_device_type = {
3410         .name           = "rbd",
3411         .groups         = rbd_attr_groups,
3412         .release        = rbd_sysfs_dev_release,
3413 };
3414
3415 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3416 {
3417         kref_get(&spec->kref);
3418
3419         return spec;
3420 }
3421
3422 static void rbd_spec_free(struct kref *kref);
3423 static void rbd_spec_put(struct rbd_spec *spec)
3424 {
3425         if (spec)
3426                 kref_put(&spec->kref, rbd_spec_free);
3427 }
3428
3429 static struct rbd_spec *rbd_spec_alloc(void)
3430 {
3431         struct rbd_spec *spec;
3432
3433         spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3434         if (!spec)
3435                 return NULL;
3436         kref_init(&spec->kref);
3437
3438         return spec;
3439 }
3440
3441 static void rbd_spec_free(struct kref *kref)
3442 {
3443         struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3444
3445         kfree(spec->pool_name);
3446         kfree(spec->image_id);
3447         kfree(spec->image_name);
3448         kfree(spec->snap_name);
3449         kfree(spec);
3450 }
3451
3452 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
3453                                 struct rbd_spec *spec)
3454 {
3455         struct rbd_device *rbd_dev;
3456
3457         rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3458         if (!rbd_dev)
3459                 return NULL;
3460
3461         spin_lock_init(&rbd_dev->lock);
3462         rbd_dev->flags = 0;
3463         INIT_LIST_HEAD(&rbd_dev->node);
3464         init_rwsem(&rbd_dev->header_rwsem);
3465
3466         rbd_dev->spec = spec;
3467         rbd_dev->rbd_client = rbdc;
3468
3469         /* Initialize the layout used for all rbd requests */
3470
3471         rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3472         rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3473         rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3474         rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3475
3476         return rbd_dev;
3477 }
3478
3479 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3480 {
3481         rbd_put_client(rbd_dev->rbd_client);
3482         rbd_spec_put(rbd_dev->spec);
3483         kfree(rbd_dev);
3484 }
3485
3486 /*
3487  * Get the size and object order for an image snapshot, or if
3488  * snap_id is CEPH_NOSNAP, gets this information for the base
3489  * image.
3490  */
3491 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3492                                 u8 *order, u64 *snap_size)
3493 {
3494         __le64 snapid = cpu_to_le64(snap_id);
3495         int ret;
3496         struct {
3497                 u8 order;
3498                 __le64 size;
3499         } __attribute__ ((packed)) size_buf = { 0 };
3500
3501         ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3502                                 "rbd", "get_size",
3503                                 &snapid, sizeof (snapid),
3504                                 &size_buf, sizeof (size_buf));
3505         dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3506         if (ret < 0)
3507                 return ret;
3508         if (ret < sizeof (size_buf))
3509                 return -ERANGE;
3510
3511         if (order)
3512                 *order = size_buf.order;
3513         *snap_size = le64_to_cpu(size_buf.size);
3514
3515         dout("  snap_id 0x%016llx order = %u, snap_size = %llu\n",
3516                 (unsigned long long)snap_id, (unsigned int)*order,
3517                 (unsigned long long)*snap_size);
3518
3519         return 0;
3520 }
3521
3522 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3523 {
3524         return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3525                                         &rbd_dev->header.obj_order,
3526                                         &rbd_dev->header.image_size);
3527 }
3528
3529 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3530 {
3531         void *reply_buf;
3532         int ret;
3533         void *p;
3534
3535         reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
3536         if (!reply_buf)
3537                 return -ENOMEM;
3538
3539         ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3540                                 "rbd", "get_object_prefix", NULL, 0,
3541                                 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
3542         dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3543         if (ret < 0)
3544                 goto out;
3545
3546         p = reply_buf;
3547         rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
3548                                                 p + ret, NULL, GFP_NOIO);
3549         ret = 0;
3550
3551         if (IS_ERR(rbd_dev->header.object_prefix)) {
3552                 ret = PTR_ERR(rbd_dev->header.object_prefix);
3553                 rbd_dev->header.object_prefix = NULL;
3554         } else {
3555                 dout("  object_prefix = %s\n", rbd_dev->header.object_prefix);
3556         }
3557 out:
3558         kfree(reply_buf);
3559
3560         return ret;
3561 }
3562
3563 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
3564                 u64 *snap_features)
3565 {
3566         __le64 snapid = cpu_to_le64(snap_id);
3567         struct {
3568                 __le64 features;
3569                 __le64 incompat;
3570         } __attribute__ ((packed)) features_buf = { 0 };
3571         u64 incompat;
3572         int ret;
3573
3574         ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3575                                 "rbd", "get_features",
3576                                 &snapid, sizeof (snapid),
3577                                 &features_buf, sizeof (features_buf));
3578         dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3579         if (ret < 0)
3580                 return ret;
3581         if (ret < sizeof (features_buf))
3582                 return -ERANGE;
3583
3584         incompat = le64_to_cpu(features_buf.incompat);
3585         if (incompat & ~RBD_FEATURES_SUPPORTED)
3586                 return -ENXIO;
3587
3588         *snap_features = le64_to_cpu(features_buf.features);
3589
3590         dout("  snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
3591                 (unsigned long long)snap_id,
3592                 (unsigned long long)*snap_features,
3593                 (unsigned long long)le64_to_cpu(features_buf.incompat));
3594
3595         return 0;
3596 }
3597
3598 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
3599 {
3600         return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
3601                                                 &rbd_dev->header.features);
3602 }
3603
3604 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3605 {
3606         struct rbd_spec *parent_spec;
3607         size_t size;
3608         void *reply_buf = NULL;
3609         __le64 snapid;
3610         void *p;
3611         void *end;
3612         char *image_id;
3613         u64 overlap;
3614         int ret;
3615
3616         parent_spec = rbd_spec_alloc();
3617         if (!parent_spec)
3618                 return -ENOMEM;
3619
3620         size = sizeof (__le64) +                                /* pool_id */
3621                 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX +        /* image_id */
3622                 sizeof (__le64) +                               /* snap_id */
3623                 sizeof (__le64);                                /* overlap */
3624         reply_buf = kmalloc(size, GFP_KERNEL);
3625         if (!reply_buf) {
3626                 ret = -ENOMEM;
3627                 goto out_err;
3628         }
3629
3630         snapid = cpu_to_le64(CEPH_NOSNAP);
3631         ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3632                                 "rbd", "get_parent",
3633                                 &snapid, sizeof (snapid),
3634                                 reply_buf, size);
3635         dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3636         if (ret < 0)
3637                 goto out_err;
3638
3639         p = reply_buf;
3640         end = reply_buf + ret;
3641         ret = -ERANGE;
3642         ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err);
3643         if (parent_spec->pool_id == CEPH_NOPOOL)
3644                 goto out;       /* No parent?  No problem. */
3645
3646         /* The ceph file layout needs to fit pool id in 32 bits */
3647
3648         ret = -EIO;
3649         if (parent_spec->pool_id > (u64)U32_MAX) {
3650                 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
3651                         (unsigned long long)parent_spec->pool_id, U32_MAX);
3652                 goto out_err;
3653         }
3654
3655         image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3656         if (IS_ERR(image_id)) {
3657                 ret = PTR_ERR(image_id);
3658                 goto out_err;
3659         }
3660         parent_spec->image_id = image_id;
3661         ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
3662         ceph_decode_64_safe(&p, end, overlap, out_err);
3663
3664         rbd_dev->parent_overlap = overlap;
3665         rbd_dev->parent_spec = parent_spec;
3666         parent_spec = NULL;     /* rbd_dev now owns this */
3667 out:
3668         ret = 0;
3669 out_err:
3670         kfree(reply_buf);
3671         rbd_spec_put(parent_spec);
3672
3673         return ret;
3674 }
3675
3676 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
3677 {
3678         struct {
3679                 __le64 stripe_unit;
3680                 __le64 stripe_count;
3681         } __attribute__ ((packed)) striping_info_buf = { 0 };
3682         size_t size = sizeof (striping_info_buf);
3683         void *p;
3684         u64 obj_size;
3685         u64 stripe_unit;
3686         u64 stripe_count;
3687         int ret;
3688
3689         ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3690                                 "rbd", "get_stripe_unit_count", NULL, 0,
3691                                 (char *)&striping_info_buf, size);
3692         dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3693         if (ret < 0)
3694                 return ret;
3695         if (ret < size)
3696                 return -ERANGE;
3697
3698         /*
3699          * We don't actually support the "fancy striping" feature
3700          * (STRIPINGV2) yet, but if the striping sizes are the
3701          * defaults the behavior is the same as before.  So find
3702          * out, and only fail if the image has non-default values.
3703          */
3704         ret = -EINVAL;
3705         obj_size = (u64)1 << rbd_dev->header.obj_order;
3706         p = &striping_info_buf;
3707         stripe_unit = ceph_decode_64(&p);
3708         if (stripe_unit != obj_size) {
3709                 rbd_warn(rbd_dev, "unsupported stripe unit "
3710                                 "(got %llu want %llu)",
3711                                 stripe_unit, obj_size);
3712                 return -EINVAL;
3713         }
3714         stripe_count = ceph_decode_64(&p);
3715         if (stripe_count != 1) {
3716                 rbd_warn(rbd_dev, "unsupported stripe count "
3717                                 "(got %llu want 1)", stripe_count);
3718                 return -EINVAL;
3719         }
3720         rbd_dev->header.stripe_unit = stripe_unit;
3721         rbd_dev->header.stripe_count = stripe_count;
3722
3723         return 0;
3724 }
3725
3726 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
3727 {
3728         size_t image_id_size;
3729         char *image_id;
3730         void *p;
3731         void *end;
3732         size_t size;
3733         void *reply_buf = NULL;
3734         size_t len = 0;
3735         char *image_name = NULL;
3736         int ret;
3737
3738         rbd_assert(!rbd_dev->spec->image_name);
3739
3740         len = strlen(rbd_dev->spec->image_id);
3741         image_id_size = sizeof (__le32) + len;
3742         image_id = kmalloc(image_id_size, GFP_KERNEL);
3743         if (!image_id)
3744                 return NULL;
3745
3746         p = image_id;
3747         end = image_id + image_id_size;
3748         ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
3749
3750         size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
3751         reply_buf = kmalloc(size, GFP_KERNEL);
3752         if (!reply_buf)
3753                 goto out;
3754
3755         ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
3756                                 "rbd", "dir_get_name",
3757                                 image_id, image_id_size,
3758                                 reply_buf, size);
3759         if (ret < 0)
3760                 goto out;
3761         p = reply_buf;
3762         end = reply_buf + ret;
3763
3764         image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
3765         if (IS_ERR(image_name))
3766                 image_name = NULL;
3767         else
3768                 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
3769 out:
3770         kfree(reply_buf);
3771         kfree(image_id);
3772
3773         return image_name;
3774 }
3775
3776 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
3777 {
3778         struct ceph_snap_context *snapc = rbd_dev->header.snapc;
3779         const char *snap_name;
3780         u32 which = 0;
3781
3782         /* Skip over names until we find the one we are looking for */
3783
3784         snap_name = rbd_dev->header.snap_names;
3785         while (which < snapc->num_snaps) {
3786                 if (!strcmp(name, snap_name))
3787                         return snapc->snaps[which];
3788                 snap_name += strlen(snap_name) + 1;
3789                 which++;
3790         }
3791         return CEPH_NOSNAP;
3792 }
3793
3794 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
3795 {
3796         struct ceph_snap_context *snapc = rbd_dev->header.snapc;
3797         u32 which;
3798         bool found = false;
3799         u64 snap_id;
3800
3801         for (which = 0; !found && which < snapc->num_snaps; which++) {
3802                 const char *snap_name;
3803
3804                 snap_id = snapc->snaps[which];
3805                 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
3806                 if (IS_ERR(snap_name))
3807                         break;
3808                 found = !strcmp(name, snap_name);
3809                 kfree(snap_name);
3810         }
3811         return found ? snap_id : CEPH_NOSNAP;
3812 }
3813
3814 /*
3815  * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
3816  * no snapshot by that name is found, or if an error occurs.
3817  */
3818 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
3819 {
3820         if (rbd_dev->image_format == 1)
3821                 return rbd_v1_snap_id_by_name(rbd_dev, name);
3822
3823         return rbd_v2_snap_id_by_name(rbd_dev, name);
3824 }
3825
3826 /*
3827  * When an rbd image has a parent image, it is identified by the
3828  * pool, image, and snapshot ids (not names).  This function fills
3829  * in the names for those ids.  (It's OK if we can't figure out the
3830  * name for an image id, but the pool and snapshot ids should always
3831  * exist and have names.)  All names in an rbd spec are dynamically
3832  * allocated.
3833  *
3834  * When an image being mapped (not a parent) is probed, we have the
3835  * pool name and pool id, image name and image id, and the snapshot
3836  * name.  The only thing we're missing is the snapshot id.
3837  */
3838 static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
3839 {
3840         struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3841         struct rbd_spec *spec = rbd_dev->spec;
3842         const char *pool_name;
3843         const char *image_name;
3844         const char *snap_name;
3845         int ret;
3846
3847         /*
3848          * An image being mapped will have the pool name (etc.), but
3849          * we need to look up the snapshot id.
3850          */
3851         if (spec->pool_name) {
3852                 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
3853                         u64 snap_id;
3854
3855                         snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
3856                         if (snap_id == CEPH_NOSNAP)
3857                                 return -ENOENT;
3858                         spec->snap_id = snap_id;
3859                 } else {
3860                         spec->snap_id = CEPH_NOSNAP;
3861                 }
3862
3863                 return 0;
3864         }
3865
3866         /* Get the pool name; we have to make our own copy of this */
3867
3868         pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
3869         if (!pool_name) {
3870                 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
3871                 return -EIO;
3872         }
3873         pool_name = kstrdup(pool_name, GFP_KERNEL);
3874         if (!pool_name)
3875                 return -ENOMEM;
3876
3877         /* Fetch the image name; tolerate failure here */
3878
3879         image_name = rbd_dev_image_name(rbd_dev);
3880         if (!image_name)
3881                 rbd_warn(rbd_dev, "unable to get image name");
3882
3883         /* Look up the snapshot name, and make a copy */
3884
3885         snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
3886         if (!snap_name) {
3887                 ret = -ENOMEM;
3888                 goto out_err;
3889         }
3890
3891         spec->pool_name = pool_name;
3892         spec->image_name = image_name;
3893         spec->snap_name = snap_name;
3894
3895         return 0;
3896 out_err:
3897         kfree(image_name);
3898         kfree(pool_name);
3899
3900         return ret;
3901 }
3902
3903 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
3904 {
3905         size_t size;
3906         int ret;
3907         void *reply_buf;
3908         void *p;
3909         void *end;
3910         u64 seq;
3911         u32 snap_count;
3912         struct ceph_snap_context *snapc;
3913         u32 i;
3914
3915         /*
3916          * We'll need room for the seq value (maximum snapshot id),
3917          * snapshot count, and array of that many snapshot ids.
3918          * For now we have a fixed upper limit on the number we're
3919          * prepared to receive.
3920          */
3921         size = sizeof (__le64) + sizeof (__le32) +
3922                         RBD_MAX_SNAP_COUNT * sizeof (__le64);
3923         reply_buf = kzalloc(size, GFP_KERNEL);
3924         if (!reply_buf)
3925                 return -ENOMEM;
3926
3927         ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3928                                 "rbd", "get_snapcontext", NULL, 0,
3929                                 reply_buf, size);
3930         dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3931         if (ret < 0)
3932                 goto out;
3933
3934         p = reply_buf;
3935         end = reply_buf + ret;
3936         ret = -ERANGE;
3937         ceph_decode_64_safe(&p, end, seq, out);
3938         ceph_decode_32_safe(&p, end, snap_count, out);
3939
3940         /*
3941          * Make sure the reported number of snapshot ids wouldn't go
3942          * beyond the end of our buffer.  But before checking that,
3943          * make sure the computed size of the snapshot context we
3944          * allocate is representable in a size_t.
3945          */
3946         if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
3947                                  / sizeof (u64)) {
3948                 ret = -EINVAL;
3949                 goto out;
3950         }
3951         if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
3952                 goto out;
3953         ret = 0;
3954
3955         snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
3956         if (!snapc) {
3957                 ret = -ENOMEM;
3958                 goto out;
3959         }
3960         snapc->seq = seq;
3961         for (i = 0; i < snap_count; i++)
3962                 snapc->snaps[i] = ceph_decode_64(&p);
3963
3964         ceph_put_snap_context(rbd_dev->header.snapc);
3965         rbd_dev->header.snapc = snapc;
3966
3967         dout("  snap context seq = %llu, snap_count = %u\n",
3968                 (unsigned long long)seq, (unsigned int)snap_count);
3969 out:
3970         kfree(reply_buf);
3971
3972         return ret;
3973 }
3974
3975 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
3976                                         u64 snap_id)
3977 {
3978         size_t size;
3979         void *reply_buf;
3980         __le64 snapid;
3981         int ret;
3982         void *p;
3983         void *end;
3984         char *snap_name;
3985
3986         size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
3987         reply_buf = kmalloc(size, GFP_KERNEL);
3988         if (!reply_buf)
3989                 return ERR_PTR(-ENOMEM);
3990
3991         snapid = cpu_to_le64(snap_id);
3992         ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3993                                 "rbd", "get_snapshot_name",
3994                                 &snapid, sizeof (snapid),
3995                                 reply_buf, size);
3996         dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3997         if (ret < 0) {
3998                 snap_name = ERR_PTR(ret);
3999                 goto out;
4000         }
4001
4002         p = reply_buf;
4003         end = reply_buf + ret;
4004         snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4005         if (IS_ERR(snap_name))
4006                 goto out;
4007
4008         dout("  snap_id 0x%016llx snap_name = %s\n",
4009                 (unsigned long long)snap_id, snap_name);
4010 out:
4011         kfree(reply_buf);
4012
4013         return snap_name;
4014 }
4015
4016 static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev)
4017 {
4018         int ret;
4019
4020         down_write(&rbd_dev->header_rwsem);
4021
4022         ret = rbd_dev_v2_image_size(rbd_dev);
4023         if (ret)
4024                 goto out;
4025         if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
4026                 if (rbd_dev->mapping.size != rbd_dev->header.image_size)
4027                         rbd_dev->mapping.size = rbd_dev->header.image_size;
4028
4029         ret = rbd_dev_v2_snap_context(rbd_dev);
4030         dout("rbd_dev_v2_snap_context returned %d\n", ret);
4031         if (ret)
4032                 goto out;
4033 out:
4034         up_write(&rbd_dev->header_rwsem);
4035
4036         return ret;
4037 }
4038
4039 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4040 {
4041         struct device *dev;
4042         int ret;
4043
4044         mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
4045
4046         dev = &rbd_dev->dev;
4047         dev->bus = &rbd_bus_type;
4048         dev->type = &rbd_device_type;
4049         dev->parent = &rbd_root_dev;
4050         dev->release = rbd_dev_device_release;
4051         dev_set_name(dev, "%d", rbd_dev->dev_id);
4052         ret = device_register(dev);
4053
4054         mutex_unlock(&ctl_mutex);
4055
4056         return ret;
4057 }
4058
4059 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4060 {
4061         device_unregister(&rbd_dev->dev);
4062 }
4063
4064 static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
4065
4066 /*
4067  * Get a unique rbd identifier for the given new rbd_dev, and add
4068  * the rbd_dev to the global list.  The minimum rbd id is 1.
4069  */
4070 static void rbd_dev_id_get(struct rbd_device *rbd_dev)
4071 {
4072         rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
4073
4074         spin_lock(&rbd_dev_list_lock);
4075         list_add_tail(&rbd_dev->node, &rbd_dev_list);
4076         spin_unlock(&rbd_dev_list_lock);
4077         dout("rbd_dev %p given dev id %llu\n", rbd_dev,
4078                 (unsigned long long) rbd_dev->dev_id);
4079 }
4080
4081 /*
4082  * Remove an rbd_dev from the global list, and record that its
4083  * identifier is no longer in use.
4084  */
4085 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4086 {
4087         struct list_head *tmp;
4088         int rbd_id = rbd_dev->dev_id;
4089         int max_id;
4090
4091         rbd_assert(rbd_id > 0);
4092
4093         dout("rbd_dev %p released dev id %llu\n", rbd_dev,
4094                 (unsigned long long) rbd_dev->dev_id);
4095         spin_lock(&rbd_dev_list_lock);
4096         list_del_init(&rbd_dev->node);
4097
4098         /*
4099          * If the id being "put" is not the current maximum, there
4100          * is nothing special we need to do.
4101          */
4102         if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
4103                 spin_unlock(&rbd_dev_list_lock);
4104                 return;
4105         }
4106
4107         /*
4108          * We need to update the current maximum id.  Search the
4109          * list to find out what it is.  We're more likely to find
4110          * the maximum at the end, so search the list backward.
4111          */
4112         max_id = 0;
4113         list_for_each_prev(tmp, &rbd_dev_list) {
4114                 struct rbd_device *rbd_dev;
4115
4116                 rbd_dev = list_entry(tmp, struct rbd_device, node);
4117                 if (rbd_dev->dev_id > max_id)
4118                         max_id = rbd_dev->dev_id;
4119         }
4120         spin_unlock(&rbd_dev_list_lock);
4121
4122         /*
4123          * The max id could have been updated by rbd_dev_id_get(), in
4124          * which case it now accurately reflects the new maximum.
4125          * Be careful not to overwrite the maximum value in that
4126          * case.
4127          */
4128         atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
4129         dout("  max dev id has been reset\n");
4130 }
4131
4132 /*
4133  * Skips over white space at *buf, and updates *buf to point to the
4134  * first found non-space character (if any). Returns the length of
4135  * the token (string of non-white space characters) found.  Note
4136  * that *buf must be terminated with '\0'.
4137  */
4138 static inline size_t next_token(const char **buf)
4139 {
4140         /*
4141         * These are the characters that produce nonzero for
4142         * isspace() in the "C" and "POSIX" locales.
4143         */
4144         const char *spaces = " \f\n\r\t\v";
4145
4146         *buf += strspn(*buf, spaces);   /* Find start of token */
4147
4148         return strcspn(*buf, spaces);   /* Return token length */
4149 }
4150
4151 /*
4152  * Finds the next token in *buf, and if the provided token buffer is
4153  * big enough, copies the found token into it.  The result, if
4154  * copied, is guaranteed to be terminated with '\0'.  Note that *buf
4155  * must be terminated with '\0' on entry.
4156  *
4157  * Returns the length of the token found (not including the '\0').
4158  * Return value will be 0 if no token is found, and it will be >=
4159  * token_size if the token would not fit.
4160  *
4161  * The *buf pointer will be updated to point beyond the end of the
4162  * found token.  Note that this occurs even if the token buffer is
4163  * too small to hold it.
4164  */
4165 static inline size_t copy_token(const char **buf,
4166                                 char *token,
4167                                 size_t token_size)
4168 {
4169         size_t len;
4170
4171         len = next_token(buf);
4172         if (len < token_size) {
4173                 memcpy(token, *buf, len);
4174                 *(token + len) = '\0';
4175         }
4176         *buf += len;
4177
4178         return len;
4179 }
4180
4181 /*
4182  * Finds the next token in *buf, dynamically allocates a buffer big
4183  * enough to hold a copy of it, and copies the token into the new
4184  * buffer.  The copy is guaranteed to be terminated with '\0'.  Note
4185  * that a duplicate buffer is created even for a zero-length token.
4186  *
4187  * Returns a pointer to the newly-allocated duplicate, or a null
4188  * pointer if memory for the duplicate was not available.  If
4189  * the lenp argument is a non-null pointer, the length of the token
4190  * (not including the '\0') is returned in *lenp.
4191  *
4192  * If successful, the *buf pointer will be updated to point beyond
4193  * the end of the found token.
4194  *
4195  * Note: uses GFP_KERNEL for allocation.
4196  */
4197 static inline char *dup_token(const char **buf, size_t *lenp)
4198 {
4199         char *dup;
4200         size_t len;
4201
4202         len = next_token(buf);
4203         dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4204         if (!dup)
4205                 return NULL;
4206         *(dup + len) = '\0';
4207         *buf += len;
4208
4209         if (lenp)
4210                 *lenp = len;
4211
4212         return dup;
4213 }
4214
4215 /*
4216  * Parse the options provided for an "rbd add" (i.e., rbd image
4217  * mapping) request.  These arrive via a write to /sys/bus/rbd/add,
4218  * and the data written is passed here via a NUL-terminated buffer.
4219  * Returns 0 if successful or an error code otherwise.
4220  *
4221  * The information extracted from these options is recorded in
4222  * the other parameters which return dynamically-allocated
4223  * structures:
4224  *  ceph_opts
4225  *      The address of a pointer that will refer to a ceph options
4226  *      structure.  Caller must release the returned pointer using
4227  *      ceph_destroy_options() when it is no longer needed.
4228  *  rbd_opts
4229  *      Address of an rbd options pointer.  Fully initialized by
4230  *      this function; caller must release with kfree().
4231  *  spec
4232  *      Address of an rbd image specification pointer.  Fully
4233  *      initialized by this function based on parsed options.
4234  *      Caller must release with rbd_spec_put().
4235  *
4236  * The options passed take this form:
4237  *  <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4238  * where:
4239  *  <mon_addrs>
4240  *      A comma-separated list of one or more monitor addresses.
4241  *      A monitor address is an ip address, optionally followed
4242  *      by a port number (separated by a colon).
4243  *        I.e.:  ip1[:port1][,ip2[:port2]...]
4244  *  <options>
4245  *      A comma-separated list of ceph and/or rbd options.
4246  *  <pool_name>
4247  *      The name of the rados pool containing the rbd image.
4248  *  <image_name>
4249  *      The name of the image in that pool to map.
4250  *  <snap_id>
4251  *      An optional snapshot id.  If provided, the mapping will
4252  *      present data from the image at the time that snapshot was
4253  *      created.  The image head is used if no snapshot id is
4254  *      provided.  Snapshot mappings are always read-only.
4255  */
4256 static int rbd_add_parse_args(const char *buf,
4257                                 struct ceph_options **ceph_opts,
4258                                 struct rbd_options **opts,
4259                                 struct rbd_spec **rbd_spec)
4260 {
4261         size_t len;
4262         char *options;
4263         const char *mon_addrs;
4264         char *snap_name;
4265         size_t mon_addrs_size;
4266         struct rbd_spec *spec = NULL;
4267         struct rbd_options *rbd_opts = NULL;
4268         struct ceph_options *copts;
4269         int ret;
4270
4271         /* The first four tokens are required */
4272
4273         len = next_token(&buf);
4274         if (!len) {
4275                 rbd_warn(NULL, "no monitor address(es) provided");
4276                 return -EINVAL;
4277         }
4278         mon_addrs = buf;
4279         mon_addrs_size = len + 1;
4280         buf += len;
4281
4282         ret = -EINVAL;
4283         options = dup_token(&buf, NULL);
4284         if (!options)
4285                 return -ENOMEM;
4286         if (!*options) {
4287                 rbd_warn(NULL, "no options provided");
4288                 goto out_err;
4289         }
4290
4291         spec = rbd_spec_alloc();
4292         if (!spec)
4293                 goto out_mem;
4294
4295         spec->pool_name = dup_token(&buf, NULL);
4296         if (!spec->pool_name)
4297                 goto out_mem;
4298         if (!*spec->pool_name) {
4299                 rbd_warn(NULL, "no pool name provided");
4300                 goto out_err;
4301         }
4302
4303         spec->image_name = dup_token(&buf, NULL);
4304         if (!spec->image_name)
4305                 goto out_mem;
4306         if (!*spec->image_name) {
4307                 rbd_warn(NULL, "no image name provided");
4308                 goto out_err;
4309         }
4310
4311         /*
4312          * Snapshot name is optional; default is to use "-"
4313          * (indicating the head/no snapshot).
4314          */
4315         len = next_token(&buf);
4316         if (!len) {
4317                 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4318                 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4319         } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4320                 ret = -ENAMETOOLONG;
4321                 goto out_err;
4322         }
4323         snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4324         if (!snap_name)
4325                 goto out_mem;
4326         *(snap_name + len) = '\0';
4327         spec->snap_name = snap_name;
4328
4329         /* Initialize all rbd options to the defaults */
4330
4331         rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4332         if (!rbd_opts)
4333                 goto out_mem;
4334
4335         rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4336
4337         copts = ceph_parse_options(options, mon_addrs,
4338                                         mon_addrs + mon_addrs_size - 1,
4339                                         parse_rbd_opts_token, rbd_opts);
4340         if (IS_ERR(copts)) {
4341                 ret = PTR_ERR(copts);
4342                 goto out_err;
4343         }
4344         kfree(options);
4345
4346         *ceph_opts = copts;
4347         *opts = rbd_opts;
4348         *rbd_spec = spec;
4349
4350         return 0;
4351 out_mem:
4352         ret = -ENOMEM;
4353 out_err:
4354         kfree(rbd_opts);
4355         rbd_spec_put(spec);
4356         kfree(options);
4357
4358         return ret;
4359 }
4360
4361 /*
4362  * An rbd format 2 image has a unique identifier, distinct from the
4363  * name given to it by the user.  Internally, that identifier is
4364  * what's used to specify the names of objects related to the image.
4365  *
4366  * A special "rbd id" object is used to map an rbd image name to its
4367  * id.  If that object doesn't exist, then there is no v2 rbd image
4368  * with the supplied name.
4369  *
4370  * This function will record the given rbd_dev's image_id field if
4371  * it can be determined, and in that case will return 0.  If any
4372  * errors occur a negative errno will be returned and the rbd_dev's
4373  * image_id field will be unchanged (and should be NULL).
4374  */
4375 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4376 {
4377         int ret;
4378         size_t size;
4379         char *object_name;
4380         void *response;
4381         char *image_id;
4382
4383         /*
4384          * When probing a parent image, the image id is already
4385          * known (and the image name likely is not).  There's no
4386          * need to fetch the image id again in this case.  We
4387          * do still need to set the image format though.
4388          */
4389         if (rbd_dev->spec->image_id) {
4390                 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4391
4392                 return 0;
4393         }
4394
4395         /*
4396          * First, see if the format 2 image id file exists, and if
4397          * so, get the image's persistent id from it.
4398          */
4399         size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
4400         object_name = kmalloc(size, GFP_NOIO);
4401         if (!object_name)
4402                 return -ENOMEM;
4403         sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
4404         dout("rbd id object name is %s\n", object_name);
4405
4406         /* Response will be an encoded string, which includes a length */
4407
4408         size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4409         response = kzalloc(size, GFP_NOIO);
4410         if (!response) {
4411                 ret = -ENOMEM;
4412                 goto out;
4413         }
4414
4415         /* If it doesn't exist we'll assume it's a format 1 image */
4416
4417         ret = rbd_obj_method_sync(rbd_dev, object_name,
4418                                 "rbd", "get_id", NULL, 0,
4419                                 response, RBD_IMAGE_ID_LEN_MAX);
4420         dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4421         if (ret == -ENOENT) {
4422                 image_id = kstrdup("", GFP_KERNEL);
4423                 ret = image_id ? 0 : -ENOMEM;
4424                 if (!ret)
4425                         rbd_dev->image_format = 1;
4426         } else if (ret > sizeof (__le32)) {
4427                 void *p = response;
4428
4429                 image_id = ceph_extract_encoded_string(&p, p + ret,
4430                                                 NULL, GFP_NOIO);
4431                 ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0;
4432                 if (!ret)
4433                         rbd_dev->image_format = 2;
4434         } else {
4435                 ret = -EINVAL;
4436         }
4437
4438         if (!ret) {
4439                 rbd_dev->spec->image_id = image_id;
4440                 dout("image_id is %s\n", image_id);
4441         }
4442 out:
4443         kfree(response);
4444         kfree(object_name);
4445
4446         return ret;
4447 }
4448
4449 /* Undo whatever state changes are made by v1 or v2 image probe */
4450
4451 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4452 {
4453         struct rbd_image_header *header;
4454
4455         rbd_dev_remove_parent(rbd_dev);
4456         rbd_spec_put(rbd_dev->parent_spec);
4457         rbd_dev->parent_spec = NULL;
4458         rbd_dev->parent_overlap = 0;
4459
4460         /* Free dynamic fields from the header, then zero it out */
4461
4462         header = &rbd_dev->header;
4463         ceph_put_snap_context(header->snapc);
4464         kfree(header->snap_sizes);
4465         kfree(header->snap_names);
4466         kfree(header->object_prefix);
4467         memset(header, 0, sizeof (*header));
4468 }
4469
4470 static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
4471 {
4472         return rbd_dev_v1_header_read(rbd_dev);
4473 }
4474
4475 static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
4476 {
4477         int ret;
4478
4479         ret = rbd_dev_v2_image_size(rbd_dev);
4480         if (ret)
4481                 goto out_err;
4482
4483         /* Get the object prefix (a.k.a. block_name) for the image */
4484
4485         ret = rbd_dev_v2_object_prefix(rbd_dev);
4486         if (ret)
4487                 goto out_err;
4488
4489         /* Get the and check features for the image */
4490
4491         ret = rbd_dev_v2_features(rbd_dev);
4492         if (ret)
4493                 goto out_err;
4494
4495         /* If the image supports layering, get the parent info */
4496
4497         if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
4498                 ret = rbd_dev_v2_parent_info(rbd_dev);
4499                 if (ret)
4500                         goto out_err;
4501                 /*
4502                  * Print a warning if this image has a parent.
4503                  * Don't print it if the image now being probed
4504                  * is itself a parent.  We can tell at this point
4505                  * because we won't know its pool name yet (just its
4506                  * pool id).
4507                  */
4508                 if (rbd_dev->parent_spec && rbd_dev->spec->pool_name)
4509                         rbd_warn(rbd_dev, "WARNING: kernel layering "
4510                                         "is EXPERIMENTAL!");
4511         }
4512
4513         /* If the image supports fancy striping, get its parameters */
4514
4515         if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
4516                 ret = rbd_dev_v2_striping_info(rbd_dev);
4517                 if (ret < 0)
4518                         goto out_err;
4519         }
4520
4521         /* crypto and compression type aren't (yet) supported for v2 images */
4522
4523         rbd_dev->header.crypt_type = 0;
4524         rbd_dev->header.comp_type = 0;
4525
4526         /* Get the snapshot context, plus the header version */
4527
4528         ret = rbd_dev_v2_snap_context(rbd_dev);
4529         if (ret)
4530                 goto out_err;
4531
4532         return 0;
4533 out_err:
4534         rbd_dev->parent_overlap = 0;
4535         rbd_spec_put(rbd_dev->parent_spec);
4536         rbd_dev->parent_spec = NULL;
4537         kfree(rbd_dev->header_name);
4538         rbd_dev->header_name = NULL;
4539         kfree(rbd_dev->header.object_prefix);
4540         rbd_dev->header.object_prefix = NULL;
4541
4542         return ret;
4543 }
4544
4545 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
4546 {
4547         struct rbd_device *parent = NULL;
4548         struct rbd_spec *parent_spec;
4549         struct rbd_client *rbdc;
4550         int ret;
4551
4552         if (!rbd_dev->parent_spec)
4553                 return 0;
4554         /*
4555          * We need to pass a reference to the client and the parent
4556          * spec when creating the parent rbd_dev.  Images related by
4557          * parent/child relationships always share both.
4558          */
4559         parent_spec = rbd_spec_get(rbd_dev->parent_spec);
4560         rbdc = __rbd_get_client(rbd_dev->rbd_client);
4561
4562         ret = -ENOMEM;
4563         parent = rbd_dev_create(rbdc, parent_spec);
4564         if (!parent)
4565                 goto out_err;
4566
4567         ret = rbd_dev_image_probe(parent, true);
4568         if (ret < 0)
4569                 goto out_err;
4570         rbd_dev->parent = parent;
4571
4572         return 0;
4573 out_err:
4574         if (parent) {
4575                 rbd_spec_put(rbd_dev->parent_spec);
4576                 kfree(rbd_dev->header_name);
4577                 rbd_dev_destroy(parent);
4578         } else {
4579                 rbd_put_client(rbdc);
4580                 rbd_spec_put(parent_spec);
4581         }
4582
4583         return ret;
4584 }
4585
4586 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4587 {
4588         int ret;
4589
4590         /* generate unique id: find highest unique id, add one */
4591         rbd_dev_id_get(rbd_dev);
4592
4593         /* Fill in the device name, now that we have its id. */
4594         BUILD_BUG_ON(DEV_NAME_LEN
4595                         < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
4596         sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
4597
4598         /* Get our block major device number. */
4599
4600         ret = register_blkdev(0, rbd_dev->name);
4601         if (ret < 0)
4602                 goto err_out_id;
4603         rbd_dev->major = ret;
4604
4605         /* Set up the blkdev mapping. */
4606
4607         ret = rbd_init_disk(rbd_dev);
4608         if (ret)
4609                 goto err_out_blkdev;
4610
4611         ret = rbd_dev_mapping_set(rbd_dev);
4612         if (ret)
4613                 goto err_out_disk;
4614         set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
4615
4616         ret = rbd_bus_add_dev(rbd_dev);
4617         if (ret)
4618                 goto err_out_mapping;
4619
4620         /* Everything's ready.  Announce the disk to the world. */
4621
4622         set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4623         add_disk(rbd_dev->disk);
4624
4625         pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
4626                 (unsigned long long) rbd_dev->mapping.size);
4627
4628         return ret;
4629
4630 err_out_mapping:
4631         rbd_dev_mapping_clear(rbd_dev);
4632 err_out_disk:
4633         rbd_free_disk(rbd_dev);
4634 err_out_blkdev:
4635         unregister_blkdev(rbd_dev->major, rbd_dev->name);
4636 err_out_id:
4637         rbd_dev_id_put(rbd_dev);
4638         rbd_dev_mapping_clear(rbd_dev);
4639
4640         return ret;
4641 }
4642
4643 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
4644 {
4645         struct rbd_spec *spec = rbd_dev->spec;
4646         size_t size;
4647
4648         /* Record the header object name for this rbd image. */
4649
4650         rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4651
4652         if (rbd_dev->image_format == 1)
4653                 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
4654         else
4655                 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
4656
4657         rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
4658         if (!rbd_dev->header_name)
4659                 return -ENOMEM;
4660
4661         if (rbd_dev->image_format == 1)
4662                 sprintf(rbd_dev->header_name, "%s%s",
4663                         spec->image_name, RBD_SUFFIX);
4664         else
4665                 sprintf(rbd_dev->header_name, "%s%s",
4666                         RBD_HEADER_PREFIX, spec->image_id);
4667         return 0;
4668 }
4669
4670 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
4671 {
4672         int ret;
4673
4674         rbd_dev_unprobe(rbd_dev);
4675         ret = rbd_dev_header_watch_sync(rbd_dev, 0);
4676         if (ret)
4677                 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
4678         kfree(rbd_dev->header_name);
4679         rbd_dev->header_name = NULL;
4680         rbd_dev->image_format = 0;
4681         kfree(rbd_dev->spec->image_id);
4682         rbd_dev->spec->image_id = NULL;
4683
4684         rbd_dev_destroy(rbd_dev);
4685 }
4686
4687 /*
4688  * Probe for the existence of the header object for the given rbd
4689  * device.  For format 2 images this includes determining the image
4690  * id.
4691  */
4692 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool read_only)
4693 {
4694         int ret;
4695         int tmp;
4696
4697         /*
4698          * Get the id from the image id object.  If it's not a
4699          * format 2 image, we'll get ENOENT back, and we'll assume
4700          * it's a format 1 image.
4701          */
4702         ret = rbd_dev_image_id(rbd_dev);
4703         if (ret)
4704                 return ret;
4705         rbd_assert(rbd_dev->spec->image_id);
4706         rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4707
4708         ret = rbd_dev_header_name(rbd_dev);
4709         if (ret)
4710                 goto err_out_format;
4711
4712         ret = rbd_dev_header_watch_sync(rbd_dev, 1);
4713         if (ret)
4714                 goto out_header_name;
4715
4716         if (rbd_dev->image_format == 1)
4717                 ret = rbd_dev_v1_probe(rbd_dev);
4718         else
4719                 ret = rbd_dev_v2_probe(rbd_dev);
4720         if (ret)
4721                 goto err_out_watch;
4722
4723         ret = rbd_dev_spec_update(rbd_dev);
4724         if (ret)
4725                 goto err_out_probe;
4726
4727         /* If we are mapping a snapshot it must be marked read-only */
4728
4729         if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
4730                 read_only = true;
4731         rbd_dev->mapping.read_only = read_only;
4732
4733         ret = rbd_dev_probe_parent(rbd_dev);
4734         if (ret)
4735                 goto err_out_probe;
4736
4737         dout("discovered format %u image, header name is %s\n",
4738                 rbd_dev->image_format, rbd_dev->header_name);
4739
4740         return 0;
4741 err_out_probe:
4742         rbd_dev_unprobe(rbd_dev);
4743 err_out_watch:
4744         tmp = rbd_dev_header_watch_sync(rbd_dev, 0);
4745         if (tmp)
4746                 rbd_warn(rbd_dev, "unable to tear down watch request\n");
4747 out_header_name:
4748         kfree(rbd_dev->header_name);
4749         rbd_dev->header_name = NULL;
4750 err_out_format:
4751         rbd_dev->image_format = 0;
4752         kfree(rbd_dev->spec->image_id);
4753         rbd_dev->spec->image_id = NULL;
4754
4755         dout("probe failed, returning %d\n", ret);
4756
4757         return ret;
4758 }
4759
4760 static ssize_t rbd_add(struct bus_type *bus,
4761                        const char *buf,
4762                        size_t count)
4763 {
4764         struct rbd_device *rbd_dev = NULL;
4765         struct ceph_options *ceph_opts = NULL;
4766         struct rbd_options *rbd_opts = NULL;
4767         struct rbd_spec *spec = NULL;
4768         struct rbd_client *rbdc;
4769         struct ceph_osd_client *osdc;
4770         bool read_only;
4771         int rc = -ENOMEM;
4772
4773         if (!try_module_get(THIS_MODULE))
4774                 return -ENODEV;
4775
4776         /* parse add command */
4777         rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
4778         if (rc < 0)
4779                 goto err_out_module;
4780         read_only = rbd_opts->read_only;
4781         kfree(rbd_opts);
4782         rbd_opts = NULL;        /* done with this */
4783
4784         rbdc = rbd_get_client(ceph_opts);
4785         if (IS_ERR(rbdc)) {
4786                 rc = PTR_ERR(rbdc);
4787                 goto err_out_args;
4788         }
4789         ceph_opts = NULL;       /* rbd_dev client now owns this */
4790
4791         /* pick the pool */
4792         osdc = &rbdc->client->osdc;
4793         rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
4794         if (rc < 0)
4795                 goto err_out_client;
4796         spec->pool_id = (u64)rc;
4797
4798         /* The ceph file layout needs to fit pool id in 32 bits */
4799
4800         if (spec->pool_id > (u64)U32_MAX) {
4801                 rbd_warn(NULL, "pool id too large (%llu > %u)\n",
4802                                 (unsigned long long)spec->pool_id, U32_MAX);
4803                 rc = -EIO;
4804                 goto err_out_client;
4805         }
4806
4807         rbd_dev = rbd_dev_create(rbdc, spec);
4808         if (!rbd_dev)
4809                 goto err_out_client;
4810         rbdc = NULL;            /* rbd_dev now owns this */
4811         spec = NULL;            /* rbd_dev now owns this */
4812
4813         rc = rbd_dev_image_probe(rbd_dev, read_only);
4814         if (rc < 0)
4815                 goto err_out_rbd_dev;
4816
4817         rc = rbd_dev_device_setup(rbd_dev);
4818         if (!rc)
4819                 return count;
4820
4821         rbd_dev_image_release(rbd_dev);
4822 err_out_rbd_dev:
4823         rbd_dev_destroy(rbd_dev);
4824 err_out_client:
4825         rbd_put_client(rbdc);
4826 err_out_args:
4827         if (ceph_opts)
4828                 ceph_destroy_options(ceph_opts);
4829         kfree(rbd_opts);
4830         rbd_spec_put(spec);
4831 err_out_module:
4832         module_put(THIS_MODULE);
4833
4834         dout("Error adding device %s\n", buf);
4835
4836         return (ssize_t)rc;
4837 }
4838
4839 static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
4840 {
4841         struct list_head *tmp;
4842         struct rbd_device *rbd_dev;
4843
4844         spin_lock(&rbd_dev_list_lock);
4845         list_for_each(tmp, &rbd_dev_list) {
4846                 rbd_dev = list_entry(tmp, struct rbd_device, node);
4847                 if (rbd_dev->dev_id == dev_id) {
4848                         spin_unlock(&rbd_dev_list_lock);
4849                         return rbd_dev;
4850                 }
4851         }
4852         spin_unlock(&rbd_dev_list_lock);
4853         return NULL;
4854 }
4855
4856 static void rbd_dev_device_release(struct device *dev)
4857 {
4858         struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4859
4860         rbd_free_disk(rbd_dev);
4861         clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4862         rbd_dev_mapping_clear(rbd_dev);
4863         unregister_blkdev(rbd_dev->major, rbd_dev->name);
4864         rbd_dev->major = 0;
4865         rbd_dev_id_put(rbd_dev);
4866         rbd_dev_mapping_clear(rbd_dev);
4867 }
4868
4869 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
4870 {
4871         while (rbd_dev->parent) {
4872                 struct rbd_device *first = rbd_dev;
4873                 struct rbd_device *second = first->parent;
4874                 struct rbd_device *third;
4875
4876                 /*
4877                  * Follow to the parent with no grandparent and
4878                  * remove it.
4879                  */
4880                 while (second && (third = second->parent)) {
4881                         first = second;
4882                         second = third;
4883                 }
4884                 rbd_assert(second);
4885                 rbd_dev_image_release(second);
4886                 first->parent = NULL;
4887                 first->parent_overlap = 0;
4888
4889                 rbd_assert(first->parent_spec);
4890                 rbd_spec_put(first->parent_spec);
4891                 first->parent_spec = NULL;
4892         }
4893 }
4894
4895 static ssize_t rbd_remove(struct bus_type *bus,
4896                           const char *buf,
4897                           size_t count)
4898 {
4899         struct rbd_device *rbd_dev = NULL;
4900         int target_id;
4901         unsigned long ul;
4902         int ret;
4903
4904         ret = strict_strtoul(buf, 10, &ul);
4905         if (ret)
4906                 return ret;
4907
4908         /* convert to int; abort if we lost anything in the conversion */
4909         target_id = (int) ul;
4910         if (target_id != ul)
4911                 return -EINVAL;
4912
4913         mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
4914
4915         rbd_dev = __rbd_get_dev(target_id);
4916         if (!rbd_dev) {
4917                 ret = -ENOENT;
4918                 goto done;
4919         }
4920
4921         spin_lock_irq(&rbd_dev->lock);
4922         if (rbd_dev->open_count)
4923                 ret = -EBUSY;
4924         else
4925                 set_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
4926         spin_unlock_irq(&rbd_dev->lock);
4927         if (ret < 0)
4928                 goto done;
4929         ret = count;
4930         rbd_bus_del_dev(rbd_dev);
4931         rbd_dev_image_release(rbd_dev);
4932         module_put(THIS_MODULE);
4933 done:
4934         mutex_unlock(&ctl_mutex);
4935
4936         return ret;
4937 }
4938
4939 /*
4940  * create control files in sysfs
4941  * /sys/bus/rbd/...
4942  */
4943 static int rbd_sysfs_init(void)
4944 {
4945         int ret;
4946
4947         ret = device_register(&rbd_root_dev);
4948         if (ret < 0)
4949                 return ret;
4950
4951         ret = bus_register(&rbd_bus_type);
4952         if (ret < 0)
4953                 device_unregister(&rbd_root_dev);
4954
4955         return ret;
4956 }
4957
4958 static void rbd_sysfs_cleanup(void)
4959 {
4960         bus_unregister(&rbd_bus_type);
4961         device_unregister(&rbd_root_dev);
4962 }
4963
4964 static int rbd_slab_init(void)
4965 {
4966         rbd_assert(!rbd_img_request_cache);
4967         rbd_img_request_cache = kmem_cache_create("rbd_img_request",
4968                                         sizeof (struct rbd_img_request),
4969                                         __alignof__(struct rbd_img_request),
4970                                         0, NULL);
4971         if (!rbd_img_request_cache)
4972                 return -ENOMEM;
4973
4974         rbd_assert(!rbd_obj_request_cache);
4975         rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
4976                                         sizeof (struct rbd_obj_request),
4977                                         __alignof__(struct rbd_obj_request),
4978                                         0, NULL);
4979         if (!rbd_obj_request_cache)
4980                 goto out_err;
4981
4982         rbd_assert(!rbd_segment_name_cache);
4983         rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
4984                                         MAX_OBJ_NAME_SIZE + 1, 1, 0, NULL);
4985         if (rbd_segment_name_cache)
4986                 return 0;
4987 out_err:
4988         if (rbd_obj_request_cache) {
4989                 kmem_cache_destroy(rbd_obj_request_cache);
4990                 rbd_obj_request_cache = NULL;
4991         }
4992
4993         kmem_cache_destroy(rbd_img_request_cache);
4994         rbd_img_request_cache = NULL;
4995
4996         return -ENOMEM;
4997 }
4998
4999 static void rbd_slab_exit(void)
5000 {
5001         rbd_assert(rbd_segment_name_cache);
5002         kmem_cache_destroy(rbd_segment_name_cache);
5003         rbd_segment_name_cache = NULL;
5004
5005         rbd_assert(rbd_obj_request_cache);
5006         kmem_cache_destroy(rbd_obj_request_cache);
5007         rbd_obj_request_cache = NULL;
5008
5009         rbd_assert(rbd_img_request_cache);
5010         kmem_cache_destroy(rbd_img_request_cache);
5011         rbd_img_request_cache = NULL;
5012 }
5013
5014 static int __init rbd_init(void)
5015 {
5016         int rc;
5017
5018         if (!libceph_compatible(NULL)) {
5019                 rbd_warn(NULL, "libceph incompatibility (quitting)");
5020
5021                 return -EINVAL;
5022         }
5023         rc = rbd_slab_init();
5024         if (rc)
5025                 return rc;
5026         rc = rbd_sysfs_init();
5027         if (rc)
5028                 rbd_slab_exit();
5029         else
5030                 pr_info("loaded " RBD_DRV_NAME_LONG "\n");
5031
5032         return rc;
5033 }
5034
5035 static void __exit rbd_exit(void)
5036 {
5037         rbd_sysfs_cleanup();
5038         rbd_slab_exit();
5039 }
5040
5041 module_init(rbd_init);
5042 module_exit(rbd_exit);
5043
5044 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5045 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5046 MODULE_DESCRIPTION("rados block device");
5047
5048 /* following authorship retained from original osdblk.c */
5049 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5050
5051 MODULE_LICENSE("GPL");