3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
42 #include <linux/blkdev.h>
43 #include <linux/slab.h>
45 #include "rbd_types.h"
47 #define RBD_DEBUG /* Activate rbd_assert() calls */
50 * The basic unit of block I/O is a sector. It is interpreted in a
51 * number of contexts in Linux (blk, bio, genhd), but the default is
52 * universally 512 bytes. These symbols are just slightly more
53 * meaningful than the bare numbers they represent.
55 #define SECTOR_SHIFT 9
56 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
59 * Increment the given counter and return its updated value.
60 * If the counter is already 0 it will not be incremented.
61 * If the counter is already at its maximum value returns
62 * -EINVAL without updating it.
64 static int atomic_inc_return_safe(atomic_t *v)
68 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
69 if (counter <= (unsigned int)INT_MAX)
77 /* Decrement the counter. Return the resulting value, or -EINVAL */
78 static int atomic_dec_return_safe(atomic_t *v)
82 counter = atomic_dec_return(v);
91 #define RBD_DRV_NAME "rbd"
92 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
94 #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
96 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
97 #define RBD_MAX_SNAP_NAME_LEN \
98 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
100 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
102 #define RBD_SNAP_HEAD_NAME "-"
104 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
106 /* This allows a single page to hold an image name sent by OSD */
107 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
108 #define RBD_IMAGE_ID_LEN_MAX 64
110 #define RBD_OBJ_PREFIX_LEN_MAX 64
114 #define RBD_FEATURE_LAYERING (1<<0)
115 #define RBD_FEATURE_STRIPINGV2 (1<<1)
116 #define RBD_FEATURES_ALL \
117 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
119 /* Features supported by this (client software) implementation. */
121 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
124 * An RBD device name will be "rbd#", where the "rbd" comes from
125 * RBD_DRV_NAME above, and # is a unique integer identifier.
126 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
127 * enough to hold all possible device names.
129 #define DEV_NAME_LEN 32
130 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
133 * block device image metadata (in-memory version)
135 struct rbd_image_header {
136 /* These six fields never change for a given rbd image */
143 u64 features; /* Might be changeable someday? */
145 /* The remaining fields need to be updated occasionally */
147 struct ceph_snap_context *snapc;
148 char *snap_names; /* format 1 only */
149 u64 *snap_sizes; /* format 1 only */
153 * An rbd image specification.
155 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
156 * identify an image. Each rbd_dev structure includes a pointer to
157 * an rbd_spec structure that encapsulates this identity.
159 * Each of the id's in an rbd_spec has an associated name. For a
160 * user-mapped image, the names are supplied and the id's associated
161 * with them are looked up. For a layered image, a parent image is
162 * defined by the tuple, and the names are looked up.
164 * An rbd_dev structure contains a parent_spec pointer which is
165 * non-null if the image it represents is a child in a layered
166 * image. This pointer will refer to the rbd_spec structure used
167 * by the parent rbd_dev for its own identity (i.e., the structure
168 * is shared between the parent and child).
170 * Since these structures are populated once, during the discovery
171 * phase of image construction, they are effectively immutable so
172 * we make no effort to synchronize access to them.
174 * Note that code herein does not assume the image name is known (it
175 * could be a null pointer).
179 const char *pool_name;
181 const char *image_id;
182 const char *image_name;
185 const char *snap_name;
191 * an instance of the client. multiple devices may share an rbd client.
194 struct ceph_client *client;
196 struct list_head node;
199 struct rbd_img_request;
200 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
202 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
204 struct rbd_obj_request;
205 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
207 enum obj_request_type {
208 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
212 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
213 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
214 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
215 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
218 struct rbd_obj_request {
219 const char *object_name;
220 u64 offset; /* object start byte */
221 u64 length; /* bytes from offset */
225 * An object request associated with an image will have its
226 * img_data flag set; a standalone object request will not.
228 * A standalone object request will have which == BAD_WHICH
229 * and a null obj_request pointer.
231 * An object request initiated in support of a layered image
232 * object (to check for its existence before a write) will
233 * have which == BAD_WHICH and a non-null obj_request pointer.
235 * Finally, an object request for rbd image data will have
236 * which != BAD_WHICH, and will have a non-null img_request
237 * pointer. The value of which will be in the range
238 * 0..(img_request->obj_request_count-1).
241 struct rbd_obj_request *obj_request; /* STAT op */
243 struct rbd_img_request *img_request;
245 /* links for img_request->obj_requests list */
246 struct list_head links;
249 u32 which; /* posn image request list */
251 enum obj_request_type type;
253 struct bio *bio_list;
259 struct page **copyup_pages;
260 u32 copyup_page_count;
262 struct ceph_osd_request *osd_req;
264 u64 xferred; /* bytes transferred */
267 rbd_obj_callback_t callback;
268 struct completion completion;
274 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
275 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
276 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
279 struct rbd_img_request {
280 struct rbd_device *rbd_dev;
281 u64 offset; /* starting image byte offset */
282 u64 length; /* byte count from offset */
285 u64 snap_id; /* for reads */
286 struct ceph_snap_context *snapc; /* for writes */
289 struct request *rq; /* block request */
290 struct rbd_obj_request *obj_request; /* obj req initiator */
292 struct page **copyup_pages;
293 u32 copyup_page_count;
294 spinlock_t completion_lock;/* protects next_completion */
296 rbd_img_callback_t callback;
297 u64 xferred;/* aggregate bytes transferred */
298 int result; /* first nonzero obj_request result */
300 u32 obj_request_count;
301 struct list_head obj_requests; /* rbd_obj_request structs */
306 #define for_each_obj_request(ireq, oreq) \
307 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
308 #define for_each_obj_request_from(ireq, oreq) \
309 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
310 #define for_each_obj_request_safe(ireq, oreq, n) \
311 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
323 int dev_id; /* blkdev unique id */
325 int major; /* blkdev assigned major */
326 struct gendisk *disk; /* blkdev's gendisk and rq */
328 u32 image_format; /* Either 1 or 2 */
329 struct rbd_client *rbd_client;
331 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
333 spinlock_t lock; /* queue, flags, open_count */
335 struct rbd_image_header header;
336 unsigned long flags; /* possibly lock protected */
337 struct rbd_spec *spec;
341 struct ceph_file_layout layout;
343 struct ceph_osd_event *watch_event;
344 struct rbd_obj_request *watch_request;
346 struct rbd_spec *parent_spec;
349 struct rbd_device *parent;
351 /* protects updating the header */
352 struct rw_semaphore header_rwsem;
354 struct rbd_mapping mapping;
356 struct list_head node;
360 unsigned long open_count; /* protected by lock */
364 * Flag bits for rbd_dev->flags. If atomicity is required,
365 * rbd_dev->lock is used to protect access.
367 * Currently, only the "removing" flag (which is coupled with the
368 * "open_count" field) requires atomic access.
371 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
372 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
375 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
377 static LIST_HEAD(rbd_dev_list); /* devices */
378 static DEFINE_SPINLOCK(rbd_dev_list_lock);
380 static LIST_HEAD(rbd_client_list); /* clients */
381 static DEFINE_SPINLOCK(rbd_client_list_lock);
383 /* Slab caches for frequently-allocated structures */
385 static struct kmem_cache *rbd_img_request_cache;
386 static struct kmem_cache *rbd_obj_request_cache;
387 static struct kmem_cache *rbd_segment_name_cache;
389 static int rbd_img_request_submit(struct rbd_img_request *img_request);
391 static void rbd_dev_device_release(struct device *dev);
393 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
395 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
397 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
398 static void rbd_spec_put(struct rbd_spec *spec);
400 static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
401 static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
403 static struct attribute *rbd_bus_attrs[] = {
405 &bus_attr_remove.attr,
408 ATTRIBUTE_GROUPS(rbd_bus);
410 static struct bus_type rbd_bus_type = {
412 .bus_groups = rbd_bus_groups,
415 static void rbd_root_dev_release(struct device *dev)
419 static struct device rbd_root_dev = {
421 .release = rbd_root_dev_release,
424 static __printf(2, 3)
425 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
427 struct va_format vaf;
435 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
436 else if (rbd_dev->disk)
437 printk(KERN_WARNING "%s: %s: %pV\n",
438 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
439 else if (rbd_dev->spec && rbd_dev->spec->image_name)
440 printk(KERN_WARNING "%s: image %s: %pV\n",
441 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
442 else if (rbd_dev->spec && rbd_dev->spec->image_id)
443 printk(KERN_WARNING "%s: id %s: %pV\n",
444 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
446 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
447 RBD_DRV_NAME, rbd_dev, &vaf);
452 #define rbd_assert(expr) \
453 if (unlikely(!(expr))) { \
454 printk(KERN_ERR "\nAssertion failure in %s() " \
456 "\trbd_assert(%s);\n\n", \
457 __func__, __LINE__, #expr); \
460 #else /* !RBD_DEBUG */
461 # define rbd_assert(expr) ((void) 0)
462 #endif /* !RBD_DEBUG */
464 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
465 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
466 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
468 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
469 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
470 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev);
471 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
473 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
474 u8 *order, u64 *snap_size);
475 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
477 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
479 static int rbd_open(struct block_device *bdev, fmode_t mode)
481 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
482 bool removing = false;
484 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
487 spin_lock_irq(&rbd_dev->lock);
488 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
491 rbd_dev->open_count++;
492 spin_unlock_irq(&rbd_dev->lock);
496 (void) get_device(&rbd_dev->dev);
497 set_device_ro(bdev, rbd_dev->mapping.read_only);
502 static void rbd_release(struct gendisk *disk, fmode_t mode)
504 struct rbd_device *rbd_dev = disk->private_data;
505 unsigned long open_count_before;
507 spin_lock_irq(&rbd_dev->lock);
508 open_count_before = rbd_dev->open_count--;
509 spin_unlock_irq(&rbd_dev->lock);
510 rbd_assert(open_count_before > 0);
512 put_device(&rbd_dev->dev);
515 static const struct block_device_operations rbd_bd_ops = {
516 .owner = THIS_MODULE,
518 .release = rbd_release,
522 * Initialize an rbd client instance. Success or not, this function
523 * consumes ceph_opts. Caller holds client_mutex.
525 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
527 struct rbd_client *rbdc;
530 dout("%s:\n", __func__);
531 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
535 kref_init(&rbdc->kref);
536 INIT_LIST_HEAD(&rbdc->node);
538 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
539 if (IS_ERR(rbdc->client))
541 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
543 ret = ceph_open_session(rbdc->client);
547 spin_lock(&rbd_client_list_lock);
548 list_add_tail(&rbdc->node, &rbd_client_list);
549 spin_unlock(&rbd_client_list_lock);
551 dout("%s: rbdc %p\n", __func__, rbdc);
555 ceph_destroy_client(rbdc->client);
560 ceph_destroy_options(ceph_opts);
561 dout("%s: error %d\n", __func__, ret);
566 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
568 kref_get(&rbdc->kref);
574 * Find a ceph client with specific addr and configuration. If
575 * found, bump its reference count.
577 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
579 struct rbd_client *client_node;
582 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
585 spin_lock(&rbd_client_list_lock);
586 list_for_each_entry(client_node, &rbd_client_list, node) {
587 if (!ceph_compare_options(ceph_opts, client_node->client)) {
588 __rbd_get_client(client_node);
594 spin_unlock(&rbd_client_list_lock);
596 return found ? client_node : NULL;
606 /* string args above */
609 /* Boolean args above */
613 static match_table_t rbd_opts_tokens = {
615 /* string args above */
616 {Opt_read_only, "read_only"},
617 {Opt_read_only, "ro"}, /* Alternate spelling */
618 {Opt_read_write, "read_write"},
619 {Opt_read_write, "rw"}, /* Alternate spelling */
620 /* Boolean args above */
628 #define RBD_READ_ONLY_DEFAULT false
630 static int parse_rbd_opts_token(char *c, void *private)
632 struct rbd_options *rbd_opts = private;
633 substring_t argstr[MAX_OPT_ARGS];
634 int token, intval, ret;
636 token = match_token(c, rbd_opts_tokens, argstr);
640 if (token < Opt_last_int) {
641 ret = match_int(&argstr[0], &intval);
643 pr_err("bad mount option arg (not int) "
647 dout("got int token %d val %d\n", token, intval);
648 } else if (token > Opt_last_int && token < Opt_last_string) {
649 dout("got string token %d val %s\n", token,
651 } else if (token > Opt_last_string && token < Opt_last_bool) {
652 dout("got Boolean token %d\n", token);
654 dout("got token %d\n", token);
659 rbd_opts->read_only = true;
662 rbd_opts->read_only = false;
672 * Get a ceph client with specific addr and configuration, if one does
673 * not exist create it. Either way, ceph_opts is consumed by this
676 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
678 struct rbd_client *rbdc;
680 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
681 rbdc = rbd_client_find(ceph_opts);
682 if (rbdc) /* using an existing client */
683 ceph_destroy_options(ceph_opts);
685 rbdc = rbd_client_create(ceph_opts);
686 mutex_unlock(&client_mutex);
692 * Destroy ceph client
694 * Caller must hold rbd_client_list_lock.
696 static void rbd_client_release(struct kref *kref)
698 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
700 dout("%s: rbdc %p\n", __func__, rbdc);
701 spin_lock(&rbd_client_list_lock);
702 list_del(&rbdc->node);
703 spin_unlock(&rbd_client_list_lock);
705 ceph_destroy_client(rbdc->client);
710 * Drop reference to ceph client node. If it's not referenced anymore, release
713 static void rbd_put_client(struct rbd_client *rbdc)
716 kref_put(&rbdc->kref, rbd_client_release);
719 static bool rbd_image_format_valid(u32 image_format)
721 return image_format == 1 || image_format == 2;
724 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
729 /* The header has to start with the magic rbd header text */
730 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
733 /* The bio layer requires at least sector-sized I/O */
735 if (ondisk->options.order < SECTOR_SHIFT)
738 /* If we use u64 in a few spots we may be able to loosen this */
740 if (ondisk->options.order > 8 * sizeof (int) - 1)
744 * The size of a snapshot header has to fit in a size_t, and
745 * that limits the number of snapshots.
747 snap_count = le32_to_cpu(ondisk->snap_count);
748 size = SIZE_MAX - sizeof (struct ceph_snap_context);
749 if (snap_count > size / sizeof (__le64))
753 * Not only that, but the size of the entire the snapshot
754 * header must also be representable in a size_t.
756 size -= snap_count * sizeof (__le64);
757 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
764 * Fill an rbd image header with information from the given format 1
767 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
768 struct rbd_image_header_ondisk *ondisk)
770 struct rbd_image_header *header = &rbd_dev->header;
771 bool first_time = header->object_prefix == NULL;
772 struct ceph_snap_context *snapc;
773 char *object_prefix = NULL;
774 char *snap_names = NULL;
775 u64 *snap_sizes = NULL;
781 /* Allocate this now to avoid having to handle failure below */
786 len = strnlen(ondisk->object_prefix,
787 sizeof (ondisk->object_prefix));
788 object_prefix = kmalloc(len + 1, GFP_KERNEL);
791 memcpy(object_prefix, ondisk->object_prefix, len);
792 object_prefix[len] = '\0';
795 /* Allocate the snapshot context and fill it in */
797 snap_count = le32_to_cpu(ondisk->snap_count);
798 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
801 snapc->seq = le64_to_cpu(ondisk->snap_seq);
803 struct rbd_image_snap_ondisk *snaps;
804 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
806 /* We'll keep a copy of the snapshot names... */
808 if (snap_names_len > (u64)SIZE_MAX)
810 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
814 /* ...as well as the array of their sizes. */
816 size = snap_count * sizeof (*header->snap_sizes);
817 snap_sizes = kmalloc(size, GFP_KERNEL);
822 * Copy the names, and fill in each snapshot's id
825 * Note that rbd_dev_v1_header_info() guarantees the
826 * ondisk buffer we're working with has
827 * snap_names_len bytes beyond the end of the
828 * snapshot id array, this memcpy() is safe.
830 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
831 snaps = ondisk->snaps;
832 for (i = 0; i < snap_count; i++) {
833 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
834 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
838 /* We won't fail any more, fill in the header */
841 header->object_prefix = object_prefix;
842 header->obj_order = ondisk->options.order;
843 header->crypt_type = ondisk->options.crypt_type;
844 header->comp_type = ondisk->options.comp_type;
845 /* The rest aren't used for format 1 images */
846 header->stripe_unit = 0;
847 header->stripe_count = 0;
848 header->features = 0;
850 ceph_put_snap_context(header->snapc);
851 kfree(header->snap_names);
852 kfree(header->snap_sizes);
855 /* The remaining fields always get updated (when we refresh) */
857 header->image_size = le64_to_cpu(ondisk->image_size);
858 header->snapc = snapc;
859 header->snap_names = snap_names;
860 header->snap_sizes = snap_sizes;
862 /* Make sure mapping size is consistent with header info */
864 if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time)
865 if (rbd_dev->mapping.size != header->image_size)
866 rbd_dev->mapping.size = header->image_size;
874 ceph_put_snap_context(snapc);
875 kfree(object_prefix);
880 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
882 const char *snap_name;
884 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
886 /* Skip over names until we find the one we are looking for */
888 snap_name = rbd_dev->header.snap_names;
890 snap_name += strlen(snap_name) + 1;
892 return kstrdup(snap_name, GFP_KERNEL);
896 * Snapshot id comparison function for use with qsort()/bsearch().
897 * Note that result is for snapshots in *descending* order.
899 static int snapid_compare_reverse(const void *s1, const void *s2)
901 u64 snap_id1 = *(u64 *)s1;
902 u64 snap_id2 = *(u64 *)s2;
904 if (snap_id1 < snap_id2)
906 return snap_id1 == snap_id2 ? 0 : -1;
910 * Search a snapshot context to see if the given snapshot id is
913 * Returns the position of the snapshot id in the array if it's found,
914 * or BAD_SNAP_INDEX otherwise.
916 * Note: The snapshot array is in kept sorted (by the osd) in
917 * reverse order, highest snapshot id first.
919 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
921 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
924 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
925 sizeof (snap_id), snapid_compare_reverse);
927 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
930 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
934 const char *snap_name;
936 which = rbd_dev_snap_index(rbd_dev, snap_id);
937 if (which == BAD_SNAP_INDEX)
938 return ERR_PTR(-ENOENT);
940 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
941 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
944 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
946 if (snap_id == CEPH_NOSNAP)
947 return RBD_SNAP_HEAD_NAME;
949 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
950 if (rbd_dev->image_format == 1)
951 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
953 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
956 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
959 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
960 if (snap_id == CEPH_NOSNAP) {
961 *snap_size = rbd_dev->header.image_size;
962 } else if (rbd_dev->image_format == 1) {
965 which = rbd_dev_snap_index(rbd_dev, snap_id);
966 if (which == BAD_SNAP_INDEX)
969 *snap_size = rbd_dev->header.snap_sizes[which];
974 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
983 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
986 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
987 if (snap_id == CEPH_NOSNAP) {
988 *snap_features = rbd_dev->header.features;
989 } else if (rbd_dev->image_format == 1) {
990 *snap_features = 0; /* No features for format 1 */
995 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
999 *snap_features = features;
1004 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1006 u64 snap_id = rbd_dev->spec->snap_id;
1011 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1014 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1018 rbd_dev->mapping.size = size;
1019 rbd_dev->mapping.features = features;
1024 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1026 rbd_dev->mapping.size = 0;
1027 rbd_dev->mapping.features = 0;
1030 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1037 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1040 segment = offset >> rbd_dev->header.obj_order;
1041 name_format = "%s.%012llx";
1042 if (rbd_dev->image_format == 2)
1043 name_format = "%s.%016llx";
1044 ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, name_format,
1045 rbd_dev->header.object_prefix, segment);
1046 if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
1047 pr_err("error formatting segment name for #%llu (%d)\n",
1056 static void rbd_segment_name_free(const char *name)
1058 /* The explicit cast here is needed to drop the const qualifier */
1060 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1063 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1065 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1067 return offset & (segment_size - 1);
1070 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1071 u64 offset, u64 length)
1073 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1075 offset &= segment_size - 1;
1077 rbd_assert(length <= U64_MAX - offset);
1078 if (offset + length > segment_size)
1079 length = segment_size - offset;
1085 * returns the size of an object in the image
1087 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1089 return 1 << header->obj_order;
1096 static void bio_chain_put(struct bio *chain)
1102 chain = chain->bi_next;
1108 * zeros a bio chain, starting at specific offset
1110 static void zero_bio_chain(struct bio *chain, int start_ofs)
1113 struct bvec_iter iter;
1114 unsigned long flags;
1119 bio_for_each_segment(bv, chain, iter) {
1120 if (pos + bv.bv_len > start_ofs) {
1121 int remainder = max(start_ofs - pos, 0);
1122 buf = bvec_kmap_irq(&bv, &flags);
1123 memset(buf + remainder, 0,
1124 bv.bv_len - remainder);
1125 flush_dcache_page(bv.bv_page);
1126 bvec_kunmap_irq(buf, &flags);
1131 chain = chain->bi_next;
1136 * similar to zero_bio_chain(), zeros data defined by a page array,
1137 * starting at the given byte offset from the start of the array and
1138 * continuing up to the given end offset. The pages array is
1139 * assumed to be big enough to hold all bytes up to the end.
1141 static void zero_pages(struct page **pages, u64 offset, u64 end)
1143 struct page **page = &pages[offset >> PAGE_SHIFT];
1145 rbd_assert(end > offset);
1146 rbd_assert(end - offset <= (u64)SIZE_MAX);
1147 while (offset < end) {
1150 unsigned long flags;
1153 page_offset = offset & ~PAGE_MASK;
1154 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
1155 local_irq_save(flags);
1156 kaddr = kmap_atomic(*page);
1157 memset(kaddr + page_offset, 0, length);
1158 flush_dcache_page(*page);
1159 kunmap_atomic(kaddr);
1160 local_irq_restore(flags);
1168 * Clone a portion of a bio, starting at the given byte offset
1169 * and continuing for the number of bytes indicated.
1171 static struct bio *bio_clone_range(struct bio *bio_src,
1172 unsigned int offset,
1178 bio = bio_clone(bio_src, gfpmask);
1180 return NULL; /* ENOMEM */
1182 bio_advance(bio, offset);
1183 bio->bi_iter.bi_size = len;
1189 * Clone a portion of a bio chain, starting at the given byte offset
1190 * into the first bio in the source chain and continuing for the
1191 * number of bytes indicated. The result is another bio chain of
1192 * exactly the given length, or a null pointer on error.
1194 * The bio_src and offset parameters are both in-out. On entry they
1195 * refer to the first source bio and the offset into that bio where
1196 * the start of data to be cloned is located.
1198 * On return, bio_src is updated to refer to the bio in the source
1199 * chain that contains first un-cloned byte, and *offset will
1200 * contain the offset of that byte within that bio.
1202 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1203 unsigned int *offset,
1207 struct bio *bi = *bio_src;
1208 unsigned int off = *offset;
1209 struct bio *chain = NULL;
1212 /* Build up a chain of clone bios up to the limit */
1214 if (!bi || off >= bi->bi_iter.bi_size || !len)
1215 return NULL; /* Nothing to clone */
1219 unsigned int bi_size;
1223 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1224 goto out_err; /* EINVAL; ran out of bio's */
1226 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
1227 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1229 goto out_err; /* ENOMEM */
1232 end = &bio->bi_next;
1235 if (off == bi->bi_iter.bi_size) {
1246 bio_chain_put(chain);
1252 * The default/initial value for all object request flags is 0. For
1253 * each flag, once its value is set to 1 it is never reset to 0
1256 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1258 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1259 struct rbd_device *rbd_dev;
1261 rbd_dev = obj_request->img_request->rbd_dev;
1262 rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
1267 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1270 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1273 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1275 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1276 struct rbd_device *rbd_dev = NULL;
1278 if (obj_request_img_data_test(obj_request))
1279 rbd_dev = obj_request->img_request->rbd_dev;
1280 rbd_warn(rbd_dev, "obj_request %p already marked done\n",
1285 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1288 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1292 * This sets the KNOWN flag after (possibly) setting the EXISTS
1293 * flag. The latter is set based on the "exists" value provided.
1295 * Note that for our purposes once an object exists it never goes
1296 * away again. It's possible that the response from two existence
1297 * checks are separated by the creation of the target object, and
1298 * the first ("doesn't exist") response arrives *after* the second
1299 * ("does exist"). In that case we ignore the second one.
1301 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1305 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1306 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1310 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1313 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1316 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1319 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1322 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1324 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1325 atomic_read(&obj_request->kref.refcount));
1326 kref_get(&obj_request->kref);
1329 static void rbd_obj_request_destroy(struct kref *kref);
1330 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1332 rbd_assert(obj_request != NULL);
1333 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1334 atomic_read(&obj_request->kref.refcount));
1335 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1338 static bool img_request_child_test(struct rbd_img_request *img_request);
1339 static void rbd_parent_request_destroy(struct kref *kref);
1340 static void rbd_img_request_destroy(struct kref *kref);
1341 static void rbd_img_request_put(struct rbd_img_request *img_request)
1343 rbd_assert(img_request != NULL);
1344 dout("%s: img %p (was %d)\n", __func__, img_request,
1345 atomic_read(&img_request->kref.refcount));
1346 if (img_request_child_test(img_request))
1347 kref_put(&img_request->kref, rbd_parent_request_destroy);
1349 kref_put(&img_request->kref, rbd_img_request_destroy);
1352 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1353 struct rbd_obj_request *obj_request)
1355 rbd_assert(obj_request->img_request == NULL);
1357 /* Image request now owns object's original reference */
1358 obj_request->img_request = img_request;
1359 obj_request->which = img_request->obj_request_count;
1360 rbd_assert(!obj_request_img_data_test(obj_request));
1361 obj_request_img_data_set(obj_request);
1362 rbd_assert(obj_request->which != BAD_WHICH);
1363 img_request->obj_request_count++;
1364 list_add_tail(&obj_request->links, &img_request->obj_requests);
1365 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1366 obj_request->which);
1369 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1370 struct rbd_obj_request *obj_request)
1372 rbd_assert(obj_request->which != BAD_WHICH);
1374 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1375 obj_request->which);
1376 list_del(&obj_request->links);
1377 rbd_assert(img_request->obj_request_count > 0);
1378 img_request->obj_request_count--;
1379 rbd_assert(obj_request->which == img_request->obj_request_count);
1380 obj_request->which = BAD_WHICH;
1381 rbd_assert(obj_request_img_data_test(obj_request));
1382 rbd_assert(obj_request->img_request == img_request);
1383 obj_request->img_request = NULL;
1384 obj_request->callback = NULL;
1385 rbd_obj_request_put(obj_request);
1388 static bool obj_request_type_valid(enum obj_request_type type)
1391 case OBJ_REQUEST_NODATA:
1392 case OBJ_REQUEST_BIO:
1393 case OBJ_REQUEST_PAGES:
1400 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1401 struct rbd_obj_request *obj_request)
1403 dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1405 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1408 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1411 dout("%s: img %p\n", __func__, img_request);
1414 * If no error occurred, compute the aggregate transfer
1415 * count for the image request. We could instead use
1416 * atomic64_cmpxchg() to update it as each object request
1417 * completes; not clear which way is better off hand.
1419 if (!img_request->result) {
1420 struct rbd_obj_request *obj_request;
1423 for_each_obj_request(img_request, obj_request)
1424 xferred += obj_request->xferred;
1425 img_request->xferred = xferred;
1428 if (img_request->callback)
1429 img_request->callback(img_request);
1431 rbd_img_request_put(img_request);
1434 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1436 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1438 dout("%s: obj %p\n", __func__, obj_request);
1440 return wait_for_completion_interruptible(&obj_request->completion);
1444 * The default/initial value for all image request flags is 0. Each
1445 * is conditionally set to 1 at image request initialization time
1446 * and currently never change thereafter.
1448 static void img_request_write_set(struct rbd_img_request *img_request)
1450 set_bit(IMG_REQ_WRITE, &img_request->flags);
1454 static bool img_request_write_test(struct rbd_img_request *img_request)
1457 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1460 static void img_request_child_set(struct rbd_img_request *img_request)
1462 set_bit(IMG_REQ_CHILD, &img_request->flags);
1466 static void img_request_child_clear(struct rbd_img_request *img_request)
1468 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1472 static bool img_request_child_test(struct rbd_img_request *img_request)
1475 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1478 static void img_request_layered_set(struct rbd_img_request *img_request)
1480 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1484 static void img_request_layered_clear(struct rbd_img_request *img_request)
1486 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1490 static bool img_request_layered_test(struct rbd_img_request *img_request)
1493 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1497 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1499 u64 xferred = obj_request->xferred;
1500 u64 length = obj_request->length;
1502 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1503 obj_request, obj_request->img_request, obj_request->result,
1506 * ENOENT means a hole in the image. We zero-fill the entire
1507 * length of the request. A short read also implies zero-fill
1508 * to the end of the request. An error requires the whole
1509 * length of the request to be reported finished with an error
1510 * to the block layer. In each case we update the xferred
1511 * count to indicate the whole request was satisfied.
1513 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1514 if (obj_request->result == -ENOENT) {
1515 if (obj_request->type == OBJ_REQUEST_BIO)
1516 zero_bio_chain(obj_request->bio_list, 0);
1518 zero_pages(obj_request->pages, 0, length);
1519 obj_request->result = 0;
1520 } else if (xferred < length && !obj_request->result) {
1521 if (obj_request->type == OBJ_REQUEST_BIO)
1522 zero_bio_chain(obj_request->bio_list, xferred);
1524 zero_pages(obj_request->pages, xferred, length);
1526 obj_request->xferred = length;
1527 obj_request_done_set(obj_request);
1530 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1532 dout("%s: obj %p cb %p\n", __func__, obj_request,
1533 obj_request->callback);
1534 if (obj_request->callback)
1535 obj_request->callback(obj_request);
1537 complete_all(&obj_request->completion);
1540 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1542 dout("%s: obj %p\n", __func__, obj_request);
1543 obj_request_done_set(obj_request);
1546 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1548 struct rbd_img_request *img_request = NULL;
1549 struct rbd_device *rbd_dev = NULL;
1550 bool layered = false;
1552 if (obj_request_img_data_test(obj_request)) {
1553 img_request = obj_request->img_request;
1554 layered = img_request && img_request_layered_test(img_request);
1555 rbd_dev = img_request->rbd_dev;
1558 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1559 obj_request, img_request, obj_request->result,
1560 obj_request->xferred, obj_request->length);
1561 if (layered && obj_request->result == -ENOENT &&
1562 obj_request->img_offset < rbd_dev->parent_overlap)
1563 rbd_img_parent_read(obj_request);
1564 else if (img_request)
1565 rbd_img_obj_request_read_callback(obj_request);
1567 obj_request_done_set(obj_request);
1570 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1572 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1573 obj_request->result, obj_request->length);
1575 * There is no such thing as a successful short write. Set
1576 * it to our originally-requested length.
1578 obj_request->xferred = obj_request->length;
1579 obj_request_done_set(obj_request);
1583 * For a simple stat call there's nothing to do. We'll do more if
1584 * this is part of a write sequence for a layered image.
1586 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1588 dout("%s: obj %p\n", __func__, obj_request);
1589 obj_request_done_set(obj_request);
1592 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1593 struct ceph_msg *msg)
1595 struct rbd_obj_request *obj_request = osd_req->r_priv;
1598 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1599 rbd_assert(osd_req == obj_request->osd_req);
1600 if (obj_request_img_data_test(obj_request)) {
1601 rbd_assert(obj_request->img_request);
1602 rbd_assert(obj_request->which != BAD_WHICH);
1604 rbd_assert(obj_request->which == BAD_WHICH);
1607 if (osd_req->r_result < 0)
1608 obj_request->result = osd_req->r_result;
1610 BUG_ON(osd_req->r_num_ops > 2);
1613 * We support a 64-bit length, but ultimately it has to be
1614 * passed to blk_end_request(), which takes an unsigned int.
1616 obj_request->xferred = osd_req->r_reply_op_len[0];
1617 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1618 opcode = osd_req->r_ops[0].op;
1620 case CEPH_OSD_OP_READ:
1621 rbd_osd_read_callback(obj_request);
1623 case CEPH_OSD_OP_WRITE:
1624 rbd_osd_write_callback(obj_request);
1626 case CEPH_OSD_OP_STAT:
1627 rbd_osd_stat_callback(obj_request);
1629 case CEPH_OSD_OP_CALL:
1630 case CEPH_OSD_OP_NOTIFY_ACK:
1631 case CEPH_OSD_OP_WATCH:
1632 rbd_osd_trivial_callback(obj_request);
1635 rbd_warn(NULL, "%s: unsupported op %hu\n",
1636 obj_request->object_name, (unsigned short) opcode);
1640 if (obj_request_done_test(obj_request))
1641 rbd_obj_request_complete(obj_request);
1644 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1646 struct rbd_img_request *img_request = obj_request->img_request;
1647 struct ceph_osd_request *osd_req = obj_request->osd_req;
1650 rbd_assert(osd_req != NULL);
1652 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1653 ceph_osdc_build_request(osd_req, obj_request->offset,
1654 NULL, snap_id, NULL);
1657 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1659 struct rbd_img_request *img_request = obj_request->img_request;
1660 struct ceph_osd_request *osd_req = obj_request->osd_req;
1661 struct ceph_snap_context *snapc;
1662 struct timespec mtime = CURRENT_TIME;
1664 rbd_assert(osd_req != NULL);
1666 snapc = img_request ? img_request->snapc : NULL;
1667 ceph_osdc_build_request(osd_req, obj_request->offset,
1668 snapc, CEPH_NOSNAP, &mtime);
1671 static struct ceph_osd_request *rbd_osd_req_create(
1672 struct rbd_device *rbd_dev,
1674 struct rbd_obj_request *obj_request)
1676 struct ceph_snap_context *snapc = NULL;
1677 struct ceph_osd_client *osdc;
1678 struct ceph_osd_request *osd_req;
1680 if (obj_request_img_data_test(obj_request)) {
1681 struct rbd_img_request *img_request = obj_request->img_request;
1683 rbd_assert(write_request ==
1684 img_request_write_test(img_request));
1686 snapc = img_request->snapc;
1689 /* Allocate and initialize the request, for the single op */
1691 osdc = &rbd_dev->rbd_client->client->osdc;
1692 osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
1694 return NULL; /* ENOMEM */
1697 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1699 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1701 osd_req->r_callback = rbd_osd_req_callback;
1702 osd_req->r_priv = obj_request;
1704 osd_req->r_oid_len = strlen(obj_request->object_name);
1705 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1706 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1708 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1714 * Create a copyup osd request based on the information in the
1715 * object request supplied. A copyup request has two osd ops,
1716 * a copyup method call, and a "normal" write request.
1718 static struct ceph_osd_request *
1719 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1721 struct rbd_img_request *img_request;
1722 struct ceph_snap_context *snapc;
1723 struct rbd_device *rbd_dev;
1724 struct ceph_osd_client *osdc;
1725 struct ceph_osd_request *osd_req;
1727 rbd_assert(obj_request_img_data_test(obj_request));
1728 img_request = obj_request->img_request;
1729 rbd_assert(img_request);
1730 rbd_assert(img_request_write_test(img_request));
1732 /* Allocate and initialize the request, for the two ops */
1734 snapc = img_request->snapc;
1735 rbd_dev = img_request->rbd_dev;
1736 osdc = &rbd_dev->rbd_client->client->osdc;
1737 osd_req = ceph_osdc_alloc_request(osdc, snapc, 2, false, GFP_ATOMIC);
1739 return NULL; /* ENOMEM */
1741 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1742 osd_req->r_callback = rbd_osd_req_callback;
1743 osd_req->r_priv = obj_request;
1745 osd_req->r_oid_len = strlen(obj_request->object_name);
1746 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1747 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1749 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1755 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1757 ceph_osdc_put_request(osd_req);
1760 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1762 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1763 u64 offset, u64 length,
1764 enum obj_request_type type)
1766 struct rbd_obj_request *obj_request;
1770 rbd_assert(obj_request_type_valid(type));
1772 size = strlen(object_name) + 1;
1773 name = kmalloc(size, GFP_KERNEL);
1777 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
1783 obj_request->object_name = memcpy(name, object_name, size);
1784 obj_request->offset = offset;
1785 obj_request->length = length;
1786 obj_request->flags = 0;
1787 obj_request->which = BAD_WHICH;
1788 obj_request->type = type;
1789 INIT_LIST_HEAD(&obj_request->links);
1790 init_completion(&obj_request->completion);
1791 kref_init(&obj_request->kref);
1793 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1794 offset, length, (int)type, obj_request);
1799 static void rbd_obj_request_destroy(struct kref *kref)
1801 struct rbd_obj_request *obj_request;
1803 obj_request = container_of(kref, struct rbd_obj_request, kref);
1805 dout("%s: obj %p\n", __func__, obj_request);
1807 rbd_assert(obj_request->img_request == NULL);
1808 rbd_assert(obj_request->which == BAD_WHICH);
1810 if (obj_request->osd_req)
1811 rbd_osd_req_destroy(obj_request->osd_req);
1813 rbd_assert(obj_request_type_valid(obj_request->type));
1814 switch (obj_request->type) {
1815 case OBJ_REQUEST_NODATA:
1816 break; /* Nothing to do */
1817 case OBJ_REQUEST_BIO:
1818 if (obj_request->bio_list)
1819 bio_chain_put(obj_request->bio_list);
1821 case OBJ_REQUEST_PAGES:
1822 if (obj_request->pages)
1823 ceph_release_page_vector(obj_request->pages,
1824 obj_request->page_count);
1828 kfree(obj_request->object_name);
1829 obj_request->object_name = NULL;
1830 kmem_cache_free(rbd_obj_request_cache, obj_request);
1833 /* It's OK to call this for a device with no parent */
1835 static void rbd_spec_put(struct rbd_spec *spec);
1836 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1838 rbd_dev_remove_parent(rbd_dev);
1839 rbd_spec_put(rbd_dev->parent_spec);
1840 rbd_dev->parent_spec = NULL;
1841 rbd_dev->parent_overlap = 0;
1845 * Parent image reference counting is used to determine when an
1846 * image's parent fields can be safely torn down--after there are no
1847 * more in-flight requests to the parent image. When the last
1848 * reference is dropped, cleaning them up is safe.
1850 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1854 if (!rbd_dev->parent_spec)
1857 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1861 /* Last reference; clean up parent data structures */
1864 rbd_dev_unparent(rbd_dev);
1866 rbd_warn(rbd_dev, "parent reference underflow\n");
1870 * If an image has a non-zero parent overlap, get a reference to its
1873 * We must get the reference before checking for the overlap to
1874 * coordinate properly with zeroing the parent overlap in
1875 * rbd_dev_v2_parent_info() when an image gets flattened. We
1876 * drop it again if there is no overlap.
1878 * Returns true if the rbd device has a parent with a non-zero
1879 * overlap and a reference for it was successfully taken, or
1882 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1886 if (!rbd_dev->parent_spec)
1889 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1890 if (counter > 0 && rbd_dev->parent_overlap)
1893 /* Image was flattened, but parent is not yet torn down */
1896 rbd_warn(rbd_dev, "parent reference overflow\n");
1902 * Caller is responsible for filling in the list of object requests
1903 * that comprises the image request, and the Linux request pointer
1904 * (if there is one).
1906 static struct rbd_img_request *rbd_img_request_create(
1907 struct rbd_device *rbd_dev,
1908 u64 offset, u64 length,
1911 struct rbd_img_request *img_request;
1913 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC);
1917 if (write_request) {
1918 down_read(&rbd_dev->header_rwsem);
1919 ceph_get_snap_context(rbd_dev->header.snapc);
1920 up_read(&rbd_dev->header_rwsem);
1923 img_request->rq = NULL;
1924 img_request->rbd_dev = rbd_dev;
1925 img_request->offset = offset;
1926 img_request->length = length;
1927 img_request->flags = 0;
1928 if (write_request) {
1929 img_request_write_set(img_request);
1930 img_request->snapc = rbd_dev->header.snapc;
1932 img_request->snap_id = rbd_dev->spec->snap_id;
1934 if (rbd_dev_parent_get(rbd_dev))
1935 img_request_layered_set(img_request);
1936 spin_lock_init(&img_request->completion_lock);
1937 img_request->next_completion = 0;
1938 img_request->callback = NULL;
1939 img_request->result = 0;
1940 img_request->obj_request_count = 0;
1941 INIT_LIST_HEAD(&img_request->obj_requests);
1942 kref_init(&img_request->kref);
1944 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
1945 write_request ? "write" : "read", offset, length,
1951 static void rbd_img_request_destroy(struct kref *kref)
1953 struct rbd_img_request *img_request;
1954 struct rbd_obj_request *obj_request;
1955 struct rbd_obj_request *next_obj_request;
1957 img_request = container_of(kref, struct rbd_img_request, kref);
1959 dout("%s: img %p\n", __func__, img_request);
1961 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1962 rbd_img_obj_request_del(img_request, obj_request);
1963 rbd_assert(img_request->obj_request_count == 0);
1965 if (img_request_layered_test(img_request)) {
1966 img_request_layered_clear(img_request);
1967 rbd_dev_parent_put(img_request->rbd_dev);
1970 if (img_request_write_test(img_request))
1971 ceph_put_snap_context(img_request->snapc);
1973 kmem_cache_free(rbd_img_request_cache, img_request);
1976 static struct rbd_img_request *rbd_parent_request_create(
1977 struct rbd_obj_request *obj_request,
1978 u64 img_offset, u64 length)
1980 struct rbd_img_request *parent_request;
1981 struct rbd_device *rbd_dev;
1983 rbd_assert(obj_request->img_request);
1984 rbd_dev = obj_request->img_request->rbd_dev;
1986 parent_request = rbd_img_request_create(rbd_dev->parent,
1987 img_offset, length, false);
1988 if (!parent_request)
1991 img_request_child_set(parent_request);
1992 rbd_obj_request_get(obj_request);
1993 parent_request->obj_request = obj_request;
1995 return parent_request;
1998 static void rbd_parent_request_destroy(struct kref *kref)
2000 struct rbd_img_request *parent_request;
2001 struct rbd_obj_request *orig_request;
2003 parent_request = container_of(kref, struct rbd_img_request, kref);
2004 orig_request = parent_request->obj_request;
2006 parent_request->obj_request = NULL;
2007 rbd_obj_request_put(orig_request);
2008 img_request_child_clear(parent_request);
2010 rbd_img_request_destroy(kref);
2013 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2015 struct rbd_img_request *img_request;
2016 unsigned int xferred;
2020 rbd_assert(obj_request_img_data_test(obj_request));
2021 img_request = obj_request->img_request;
2023 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2024 xferred = (unsigned int)obj_request->xferred;
2025 result = obj_request->result;
2027 struct rbd_device *rbd_dev = img_request->rbd_dev;
2029 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
2030 img_request_write_test(img_request) ? "write" : "read",
2031 obj_request->length, obj_request->img_offset,
2032 obj_request->offset);
2033 rbd_warn(rbd_dev, " result %d xferred %x\n",
2035 if (!img_request->result)
2036 img_request->result = result;
2039 /* Image object requests don't own their page array */
2041 if (obj_request->type == OBJ_REQUEST_PAGES) {
2042 obj_request->pages = NULL;
2043 obj_request->page_count = 0;
2046 if (img_request_child_test(img_request)) {
2047 rbd_assert(img_request->obj_request != NULL);
2048 more = obj_request->which < img_request->obj_request_count - 1;
2050 rbd_assert(img_request->rq != NULL);
2051 more = blk_end_request(img_request->rq, result, xferred);
2057 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2059 struct rbd_img_request *img_request;
2060 u32 which = obj_request->which;
2063 rbd_assert(obj_request_img_data_test(obj_request));
2064 img_request = obj_request->img_request;
2066 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2067 rbd_assert(img_request != NULL);
2068 rbd_assert(img_request->obj_request_count > 0);
2069 rbd_assert(which != BAD_WHICH);
2070 rbd_assert(which < img_request->obj_request_count);
2071 rbd_assert(which >= img_request->next_completion);
2073 spin_lock_irq(&img_request->completion_lock);
2074 if (which != img_request->next_completion)
2077 for_each_obj_request_from(img_request, obj_request) {
2079 rbd_assert(which < img_request->obj_request_count);
2081 if (!obj_request_done_test(obj_request))
2083 more = rbd_img_obj_end_request(obj_request);
2087 rbd_assert(more ^ (which == img_request->obj_request_count));
2088 img_request->next_completion = which;
2090 spin_unlock_irq(&img_request->completion_lock);
2093 rbd_img_request_complete(img_request);
2097 * Split up an image request into one or more object requests, each
2098 * to a different object. The "type" parameter indicates whether
2099 * "data_desc" is the pointer to the head of a list of bio
2100 * structures, or the base of a page array. In either case this
2101 * function assumes data_desc describes memory sufficient to hold
2102 * all data described by the image request.
2104 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2105 enum obj_request_type type,
2108 struct rbd_device *rbd_dev = img_request->rbd_dev;
2109 struct rbd_obj_request *obj_request = NULL;
2110 struct rbd_obj_request *next_obj_request;
2111 bool write_request = img_request_write_test(img_request);
2112 struct bio *bio_list = NULL;
2113 unsigned int bio_offset = 0;
2114 struct page **pages = NULL;
2119 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2120 (int)type, data_desc);
2122 opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
2123 img_offset = img_request->offset;
2124 resid = img_request->length;
2125 rbd_assert(resid > 0);
2127 if (type == OBJ_REQUEST_BIO) {
2128 bio_list = data_desc;
2129 rbd_assert(img_offset ==
2130 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2132 rbd_assert(type == OBJ_REQUEST_PAGES);
2137 struct ceph_osd_request *osd_req;
2138 const char *object_name;
2142 object_name = rbd_segment_name(rbd_dev, img_offset);
2145 offset = rbd_segment_offset(rbd_dev, img_offset);
2146 length = rbd_segment_length(rbd_dev, img_offset, resid);
2147 obj_request = rbd_obj_request_create(object_name,
2148 offset, length, type);
2149 /* object request has its own copy of the object name */
2150 rbd_segment_name_free(object_name);
2154 * set obj_request->img_request before creating the
2155 * osd_request so that it gets the right snapc
2157 rbd_img_obj_request_add(img_request, obj_request);
2159 if (type == OBJ_REQUEST_BIO) {
2160 unsigned int clone_size;
2162 rbd_assert(length <= (u64)UINT_MAX);
2163 clone_size = (unsigned int)length;
2164 obj_request->bio_list =
2165 bio_chain_clone_range(&bio_list,
2169 if (!obj_request->bio_list)
2172 unsigned int page_count;
2174 obj_request->pages = pages;
2175 page_count = (u32)calc_pages_for(offset, length);
2176 obj_request->page_count = page_count;
2177 if ((offset + length) & ~PAGE_MASK)
2178 page_count--; /* more on last page */
2179 pages += page_count;
2182 osd_req = rbd_osd_req_create(rbd_dev, write_request,
2186 obj_request->osd_req = osd_req;
2187 obj_request->callback = rbd_img_obj_callback;
2189 osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
2191 if (type == OBJ_REQUEST_BIO)
2192 osd_req_op_extent_osd_data_bio(osd_req, 0,
2193 obj_request->bio_list, length);
2195 osd_req_op_extent_osd_data_pages(osd_req, 0,
2196 obj_request->pages, length,
2197 offset & ~PAGE_MASK, false, false);
2200 rbd_osd_req_format_write(obj_request);
2202 rbd_osd_req_format_read(obj_request);
2204 obj_request->img_offset = img_offset;
2206 img_offset += length;
2213 rbd_obj_request_put(obj_request);
2215 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2216 rbd_obj_request_put(obj_request);
2222 rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2224 struct rbd_img_request *img_request;
2225 struct rbd_device *rbd_dev;
2226 struct page **pages;
2229 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2230 rbd_assert(obj_request_img_data_test(obj_request));
2231 img_request = obj_request->img_request;
2232 rbd_assert(img_request);
2234 rbd_dev = img_request->rbd_dev;
2235 rbd_assert(rbd_dev);
2237 pages = obj_request->copyup_pages;
2238 rbd_assert(pages != NULL);
2239 obj_request->copyup_pages = NULL;
2240 page_count = obj_request->copyup_page_count;
2241 rbd_assert(page_count);
2242 obj_request->copyup_page_count = 0;
2243 ceph_release_page_vector(pages, page_count);
2246 * We want the transfer count to reflect the size of the
2247 * original write request. There is no such thing as a
2248 * successful short write, so if the request was successful
2249 * we can just set it to the originally-requested length.
2251 if (!obj_request->result)
2252 obj_request->xferred = obj_request->length;
2254 /* Finish up with the normal image object callback */
2256 rbd_img_obj_callback(obj_request);
2260 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2262 struct rbd_obj_request *orig_request;
2263 struct ceph_osd_request *osd_req;
2264 struct ceph_osd_client *osdc;
2265 struct rbd_device *rbd_dev;
2266 struct page **pages;
2273 rbd_assert(img_request_child_test(img_request));
2275 /* First get what we need from the image request */
2277 pages = img_request->copyup_pages;
2278 rbd_assert(pages != NULL);
2279 img_request->copyup_pages = NULL;
2280 page_count = img_request->copyup_page_count;
2281 rbd_assert(page_count);
2282 img_request->copyup_page_count = 0;
2284 orig_request = img_request->obj_request;
2285 rbd_assert(orig_request != NULL);
2286 rbd_assert(obj_request_type_valid(orig_request->type));
2287 img_result = img_request->result;
2288 parent_length = img_request->length;
2289 rbd_assert(parent_length == img_request->xferred);
2290 rbd_img_request_put(img_request);
2292 rbd_assert(orig_request->img_request);
2293 rbd_dev = orig_request->img_request->rbd_dev;
2294 rbd_assert(rbd_dev);
2297 * If the overlap has become 0 (most likely because the
2298 * image has been flattened) we need to free the pages
2299 * and re-submit the original write request.
2301 if (!rbd_dev->parent_overlap) {
2302 struct ceph_osd_client *osdc;
2304 ceph_release_page_vector(pages, page_count);
2305 osdc = &rbd_dev->rbd_client->client->osdc;
2306 img_result = rbd_obj_request_submit(osdc, orig_request);
2315 * The original osd request is of no use to use any more.
2316 * We need a new one that can hold the two ops in a copyup
2317 * request. Allocate the new copyup osd request for the
2318 * original request, and release the old one.
2320 img_result = -ENOMEM;
2321 osd_req = rbd_osd_req_create_copyup(orig_request);
2324 rbd_osd_req_destroy(orig_request->osd_req);
2325 orig_request->osd_req = osd_req;
2326 orig_request->copyup_pages = pages;
2327 orig_request->copyup_page_count = page_count;
2329 /* Initialize the copyup op */
2331 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2332 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2335 /* Then the original write request op */
2337 offset = orig_request->offset;
2338 length = orig_request->length;
2339 osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
2340 offset, length, 0, 0);
2341 if (orig_request->type == OBJ_REQUEST_BIO)
2342 osd_req_op_extent_osd_data_bio(osd_req, 1,
2343 orig_request->bio_list, length);
2345 osd_req_op_extent_osd_data_pages(osd_req, 1,
2346 orig_request->pages, length,
2347 offset & ~PAGE_MASK, false, false);
2349 rbd_osd_req_format_write(orig_request);
2351 /* All set, send it off. */
2353 orig_request->callback = rbd_img_obj_copyup_callback;
2354 osdc = &rbd_dev->rbd_client->client->osdc;
2355 img_result = rbd_obj_request_submit(osdc, orig_request);
2359 /* Record the error code and complete the request */
2361 orig_request->result = img_result;
2362 orig_request->xferred = 0;
2363 obj_request_done_set(orig_request);
2364 rbd_obj_request_complete(orig_request);
2368 * Read from the parent image the range of data that covers the
2369 * entire target of the given object request. This is used for
2370 * satisfying a layered image write request when the target of an
2371 * object request from the image request does not exist.
2373 * A page array big enough to hold the returned data is allocated
2374 * and supplied to rbd_img_request_fill() as the "data descriptor."
2375 * When the read completes, this page array will be transferred to
2376 * the original object request for the copyup operation.
2378 * If an error occurs, record it as the result of the original
2379 * object request and mark it done so it gets completed.
2381 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2383 struct rbd_img_request *img_request = NULL;
2384 struct rbd_img_request *parent_request = NULL;
2385 struct rbd_device *rbd_dev;
2388 struct page **pages = NULL;
2392 rbd_assert(obj_request_img_data_test(obj_request));
2393 rbd_assert(obj_request_type_valid(obj_request->type));
2395 img_request = obj_request->img_request;
2396 rbd_assert(img_request != NULL);
2397 rbd_dev = img_request->rbd_dev;
2398 rbd_assert(rbd_dev->parent != NULL);
2401 * Determine the byte range covered by the object in the
2402 * child image to which the original request was to be sent.
2404 img_offset = obj_request->img_offset - obj_request->offset;
2405 length = (u64)1 << rbd_dev->header.obj_order;
2408 * There is no defined parent data beyond the parent
2409 * overlap, so limit what we read at that boundary if
2412 if (img_offset + length > rbd_dev->parent_overlap) {
2413 rbd_assert(img_offset < rbd_dev->parent_overlap);
2414 length = rbd_dev->parent_overlap - img_offset;
2418 * Allocate a page array big enough to receive the data read
2421 page_count = (u32)calc_pages_for(0, length);
2422 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2423 if (IS_ERR(pages)) {
2424 result = PTR_ERR(pages);
2430 parent_request = rbd_parent_request_create(obj_request,
2431 img_offset, length);
2432 if (!parent_request)
2435 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2438 parent_request->copyup_pages = pages;
2439 parent_request->copyup_page_count = page_count;
2441 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2442 result = rbd_img_request_submit(parent_request);
2446 parent_request->copyup_pages = NULL;
2447 parent_request->copyup_page_count = 0;
2448 parent_request->obj_request = NULL;
2449 rbd_obj_request_put(obj_request);
2452 ceph_release_page_vector(pages, page_count);
2454 rbd_img_request_put(parent_request);
2455 obj_request->result = result;
2456 obj_request->xferred = 0;
2457 obj_request_done_set(obj_request);
2462 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2464 struct rbd_obj_request *orig_request;
2465 struct rbd_device *rbd_dev;
2468 rbd_assert(!obj_request_img_data_test(obj_request));
2471 * All we need from the object request is the original
2472 * request and the result of the STAT op. Grab those, then
2473 * we're done with the request.
2475 orig_request = obj_request->obj_request;
2476 obj_request->obj_request = NULL;
2477 rbd_obj_request_put(orig_request);
2478 rbd_assert(orig_request);
2479 rbd_assert(orig_request->img_request);
2481 result = obj_request->result;
2482 obj_request->result = 0;
2484 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2485 obj_request, orig_request, result,
2486 obj_request->xferred, obj_request->length);
2487 rbd_obj_request_put(obj_request);
2490 * If the overlap has become 0 (most likely because the
2491 * image has been flattened) we need to free the pages
2492 * and re-submit the original write request.
2494 rbd_dev = orig_request->img_request->rbd_dev;
2495 if (!rbd_dev->parent_overlap) {
2496 struct ceph_osd_client *osdc;
2498 osdc = &rbd_dev->rbd_client->client->osdc;
2499 result = rbd_obj_request_submit(osdc, orig_request);
2505 * Our only purpose here is to determine whether the object
2506 * exists, and we don't want to treat the non-existence as
2507 * an error. If something else comes back, transfer the
2508 * error to the original request and complete it now.
2511 obj_request_existence_set(orig_request, true);
2512 } else if (result == -ENOENT) {
2513 obj_request_existence_set(orig_request, false);
2514 } else if (result) {
2515 orig_request->result = result;
2520 * Resubmit the original request now that we have recorded
2521 * whether the target object exists.
2523 orig_request->result = rbd_img_obj_request_submit(orig_request);
2525 if (orig_request->result)
2526 rbd_obj_request_complete(orig_request);
2529 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2531 struct rbd_obj_request *stat_request;
2532 struct rbd_device *rbd_dev;
2533 struct ceph_osd_client *osdc;
2534 struct page **pages = NULL;
2540 * The response data for a STAT call consists of:
2547 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2548 page_count = (u32)calc_pages_for(0, size);
2549 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2551 return PTR_ERR(pages);
2554 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2559 rbd_obj_request_get(obj_request);
2560 stat_request->obj_request = obj_request;
2561 stat_request->pages = pages;
2562 stat_request->page_count = page_count;
2564 rbd_assert(obj_request->img_request);
2565 rbd_dev = obj_request->img_request->rbd_dev;
2566 stat_request->osd_req = rbd_osd_req_create(rbd_dev, false,
2568 if (!stat_request->osd_req)
2570 stat_request->callback = rbd_img_obj_exists_callback;
2572 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2573 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2575 rbd_osd_req_format_read(stat_request);
2577 osdc = &rbd_dev->rbd_client->client->osdc;
2578 ret = rbd_obj_request_submit(osdc, stat_request);
2581 rbd_obj_request_put(obj_request);
2586 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2588 struct rbd_img_request *img_request;
2589 struct rbd_device *rbd_dev;
2592 rbd_assert(obj_request_img_data_test(obj_request));
2594 img_request = obj_request->img_request;
2595 rbd_assert(img_request);
2596 rbd_dev = img_request->rbd_dev;
2599 * Only writes to layered images need special handling.
2600 * Reads and non-layered writes are simple object requests.
2601 * Layered writes that start beyond the end of the overlap
2602 * with the parent have no parent data, so they too are
2603 * simple object requests. Finally, if the target object is
2604 * known to already exist, its parent data has already been
2605 * copied, so a write to the object can also be handled as a
2606 * simple object request.
2608 if (!img_request_write_test(img_request) ||
2609 !img_request_layered_test(img_request) ||
2610 rbd_dev->parent_overlap <= obj_request->img_offset ||
2611 ((known = obj_request_known_test(obj_request)) &&
2612 obj_request_exists_test(obj_request))) {
2614 struct rbd_device *rbd_dev;
2615 struct ceph_osd_client *osdc;
2617 rbd_dev = obj_request->img_request->rbd_dev;
2618 osdc = &rbd_dev->rbd_client->client->osdc;
2620 return rbd_obj_request_submit(osdc, obj_request);
2624 * It's a layered write. The target object might exist but
2625 * we may not know that yet. If we know it doesn't exist,
2626 * start by reading the data for the full target object from
2627 * the parent so we can use it for a copyup to the target.
2630 return rbd_img_obj_parent_read_full(obj_request);
2632 /* We don't know whether the target exists. Go find out. */
2634 return rbd_img_obj_exists_submit(obj_request);
2637 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2639 struct rbd_obj_request *obj_request;
2640 struct rbd_obj_request *next_obj_request;
2642 dout("%s: img %p\n", __func__, img_request);
2643 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2646 ret = rbd_img_obj_request_submit(obj_request);
2654 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2656 struct rbd_obj_request *obj_request;
2657 struct rbd_device *rbd_dev;
2662 rbd_assert(img_request_child_test(img_request));
2664 /* First get what we need from the image request and release it */
2666 obj_request = img_request->obj_request;
2667 img_xferred = img_request->xferred;
2668 img_result = img_request->result;
2669 rbd_img_request_put(img_request);
2672 * If the overlap has become 0 (most likely because the
2673 * image has been flattened) we need to re-submit the
2676 rbd_assert(obj_request);
2677 rbd_assert(obj_request->img_request);
2678 rbd_dev = obj_request->img_request->rbd_dev;
2679 if (!rbd_dev->parent_overlap) {
2680 struct ceph_osd_client *osdc;
2682 osdc = &rbd_dev->rbd_client->client->osdc;
2683 img_result = rbd_obj_request_submit(osdc, obj_request);
2688 obj_request->result = img_result;
2689 if (obj_request->result)
2693 * We need to zero anything beyond the parent overlap
2694 * boundary. Since rbd_img_obj_request_read_callback()
2695 * will zero anything beyond the end of a short read, an
2696 * easy way to do this is to pretend the data from the
2697 * parent came up short--ending at the overlap boundary.
2699 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2700 obj_end = obj_request->img_offset + obj_request->length;
2701 if (obj_end > rbd_dev->parent_overlap) {
2704 if (obj_request->img_offset < rbd_dev->parent_overlap)
2705 xferred = rbd_dev->parent_overlap -
2706 obj_request->img_offset;
2708 obj_request->xferred = min(img_xferred, xferred);
2710 obj_request->xferred = img_xferred;
2713 rbd_img_obj_request_read_callback(obj_request);
2714 rbd_obj_request_complete(obj_request);
2717 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2719 struct rbd_img_request *img_request;
2722 rbd_assert(obj_request_img_data_test(obj_request));
2723 rbd_assert(obj_request->img_request != NULL);
2724 rbd_assert(obj_request->result == (s32) -ENOENT);
2725 rbd_assert(obj_request_type_valid(obj_request->type));
2727 /* rbd_read_finish(obj_request, obj_request->length); */
2728 img_request = rbd_parent_request_create(obj_request,
2729 obj_request->img_offset,
2730 obj_request->length);
2735 if (obj_request->type == OBJ_REQUEST_BIO)
2736 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2737 obj_request->bio_list);
2739 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
2740 obj_request->pages);
2744 img_request->callback = rbd_img_parent_read_callback;
2745 result = rbd_img_request_submit(img_request);
2752 rbd_img_request_put(img_request);
2753 obj_request->result = result;
2754 obj_request->xferred = 0;
2755 obj_request_done_set(obj_request);
2758 static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
2760 struct rbd_obj_request *obj_request;
2761 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2764 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2765 OBJ_REQUEST_NODATA);
2770 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2771 if (!obj_request->osd_req)
2774 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
2776 rbd_osd_req_format_read(obj_request);
2778 ret = rbd_obj_request_submit(osdc, obj_request);
2781 ret = rbd_obj_request_wait(obj_request);
2783 rbd_obj_request_put(obj_request);
2788 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2790 struct rbd_device *rbd_dev = (struct rbd_device *)data;
2796 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
2797 rbd_dev->header_name, (unsigned long long)notify_id,
2798 (unsigned int)opcode);
2799 ret = rbd_dev_refresh(rbd_dev);
2801 rbd_warn(rbd_dev, "header refresh error (%d)\n", ret);
2803 rbd_obj_notify_ack_sync(rbd_dev, notify_id);
2807 * Request sync osd watch/unwatch. The value of "start" determines
2808 * whether a watch request is being initiated or torn down.
2810 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start)
2812 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2813 struct rbd_obj_request *obj_request;
2816 rbd_assert(start ^ !!rbd_dev->watch_event);
2817 rbd_assert(start ^ !!rbd_dev->watch_request);
2820 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
2821 &rbd_dev->watch_event);
2824 rbd_assert(rbd_dev->watch_event != NULL);
2828 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2829 OBJ_REQUEST_NODATA);
2833 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request);
2834 if (!obj_request->osd_req)
2838 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
2840 ceph_osdc_unregister_linger_request(osdc,
2841 rbd_dev->watch_request->osd_req);
2843 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2844 rbd_dev->watch_event->cookie, 0, start ? 1 : 0);
2845 rbd_osd_req_format_write(obj_request);
2847 ret = rbd_obj_request_submit(osdc, obj_request);
2850 ret = rbd_obj_request_wait(obj_request);
2853 ret = obj_request->result;
2858 * A watch request is set to linger, so the underlying osd
2859 * request won't go away until we unregister it. We retain
2860 * a pointer to the object request during that time (in
2861 * rbd_dev->watch_request), so we'll keep a reference to
2862 * it. We'll drop that reference (below) after we've
2866 rbd_dev->watch_request = obj_request;
2871 /* We have successfully torn down the watch request */
2873 rbd_obj_request_put(rbd_dev->watch_request);
2874 rbd_dev->watch_request = NULL;
2876 /* Cancel the event if we're tearing down, or on error */
2877 ceph_osdc_cancel_event(rbd_dev->watch_event);
2878 rbd_dev->watch_event = NULL;
2880 rbd_obj_request_put(obj_request);
2886 * Synchronous osd object method call. Returns the number of bytes
2887 * returned in the outbound buffer, or a negative error code.
2889 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
2890 const char *object_name,
2891 const char *class_name,
2892 const char *method_name,
2893 const void *outbound,
2894 size_t outbound_size,
2896 size_t inbound_size)
2898 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2899 struct rbd_obj_request *obj_request;
2900 struct page **pages;
2905 * Method calls are ultimately read operations. The result
2906 * should placed into the inbound buffer provided. They
2907 * also supply outbound data--parameters for the object
2908 * method. Currently if this is present it will be a
2911 page_count = (u32)calc_pages_for(0, inbound_size);
2912 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2914 return PTR_ERR(pages);
2917 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
2922 obj_request->pages = pages;
2923 obj_request->page_count = page_count;
2925 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2926 if (!obj_request->osd_req)
2929 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
2930 class_name, method_name);
2931 if (outbound_size) {
2932 struct ceph_pagelist *pagelist;
2934 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
2938 ceph_pagelist_init(pagelist);
2939 ceph_pagelist_append(pagelist, outbound, outbound_size);
2940 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
2943 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
2944 obj_request->pages, inbound_size,
2946 rbd_osd_req_format_read(obj_request);
2948 ret = rbd_obj_request_submit(osdc, obj_request);
2951 ret = rbd_obj_request_wait(obj_request);
2955 ret = obj_request->result;
2959 rbd_assert(obj_request->xferred < (u64)INT_MAX);
2960 ret = (int)obj_request->xferred;
2961 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
2964 rbd_obj_request_put(obj_request);
2966 ceph_release_page_vector(pages, page_count);
2971 static void rbd_request_fn(struct request_queue *q)
2972 __releases(q->queue_lock) __acquires(q->queue_lock)
2974 struct rbd_device *rbd_dev = q->queuedata;
2975 bool read_only = rbd_dev->mapping.read_only;
2979 while ((rq = blk_fetch_request(q))) {
2980 bool write_request = rq_data_dir(rq) == WRITE;
2981 struct rbd_img_request *img_request;
2985 /* Ignore any non-FS requests that filter through. */
2987 if (rq->cmd_type != REQ_TYPE_FS) {
2988 dout("%s: non-fs request type %d\n", __func__,
2989 (int) rq->cmd_type);
2990 __blk_end_request_all(rq, 0);
2994 /* Ignore/skip any zero-length requests */
2996 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
2997 length = (u64) blk_rq_bytes(rq);
3000 dout("%s: zero-length request\n", __func__);
3001 __blk_end_request_all(rq, 0);
3005 spin_unlock_irq(q->queue_lock);
3007 /* Disallow writes to a read-only device */
3009 if (write_request) {
3013 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3017 * Quit early if the mapped snapshot no longer
3018 * exists. It's still possible the snapshot will
3019 * have disappeared by the time our request arrives
3020 * at the osd, but there's no sense in sending it if
3023 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3024 dout("request for non-existent snapshot");
3025 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3031 if (offset && length > U64_MAX - offset + 1) {
3032 rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
3034 goto end_request; /* Shouldn't happen */
3038 if (offset + length > rbd_dev->mapping.size) {
3039 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
3040 offset, length, rbd_dev->mapping.size);
3045 img_request = rbd_img_request_create(rbd_dev, offset, length,
3050 img_request->rq = rq;
3052 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3055 result = rbd_img_request_submit(img_request);
3057 rbd_img_request_put(img_request);
3059 spin_lock_irq(q->queue_lock);
3061 rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
3062 write_request ? "write" : "read",
3063 length, offset, result);
3065 __blk_end_request_all(rq, result);
3071 * a queue callback. Makes sure that we don't create a bio that spans across
3072 * multiple osd objects. One exception would be with a single page bios,
3073 * which we handle later at bio_chain_clone_range()
3075 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
3076 struct bio_vec *bvec)
3078 struct rbd_device *rbd_dev = q->queuedata;
3079 sector_t sector_offset;
3080 sector_t sectors_per_obj;
3081 sector_t obj_sector_offset;
3085 * Find how far into its rbd object the partition-relative
3086 * bio start sector is to offset relative to the enclosing
3089 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
3090 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
3091 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
3094 * Compute the number of bytes from that offset to the end
3095 * of the object. Account for what's already used by the bio.
3097 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
3098 if (ret > bmd->bi_size)
3099 ret -= bmd->bi_size;
3104 * Don't send back more than was asked for. And if the bio
3105 * was empty, let the whole thing through because: "Note
3106 * that a block device *must* allow a single page to be
3107 * added to an empty bio."
3109 rbd_assert(bvec->bv_len <= PAGE_SIZE);
3110 if (ret > (int) bvec->bv_len || !bmd->bi_size)
3111 ret = (int) bvec->bv_len;
3116 static void rbd_free_disk(struct rbd_device *rbd_dev)
3118 struct gendisk *disk = rbd_dev->disk;
3123 rbd_dev->disk = NULL;
3124 if (disk->flags & GENHD_FL_UP) {
3127 blk_cleanup_queue(disk->queue);
3132 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3133 const char *object_name,
3134 u64 offset, u64 length, void *buf)
3137 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3138 struct rbd_obj_request *obj_request;
3139 struct page **pages = NULL;
3144 page_count = (u32) calc_pages_for(offset, length);
3145 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3147 ret = PTR_ERR(pages);
3150 obj_request = rbd_obj_request_create(object_name, offset, length,
3155 obj_request->pages = pages;
3156 obj_request->page_count = page_count;
3158 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
3159 if (!obj_request->osd_req)
3162 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3163 offset, length, 0, 0);
3164 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3166 obj_request->length,
3167 obj_request->offset & ~PAGE_MASK,
3169 rbd_osd_req_format_read(obj_request);
3171 ret = rbd_obj_request_submit(osdc, obj_request);
3174 ret = rbd_obj_request_wait(obj_request);
3178 ret = obj_request->result;
3182 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3183 size = (size_t) obj_request->xferred;
3184 ceph_copy_from_page_vector(pages, buf, 0, size);
3185 rbd_assert(size <= (size_t)INT_MAX);
3189 rbd_obj_request_put(obj_request);
3191 ceph_release_page_vector(pages, page_count);
3197 * Read the complete header for the given rbd device. On successful
3198 * return, the rbd_dev->header field will contain up-to-date
3199 * information about the image.
3201 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3203 struct rbd_image_header_ondisk *ondisk = NULL;
3210 * The complete header will include an array of its 64-bit
3211 * snapshot ids, followed by the names of those snapshots as
3212 * a contiguous block of NUL-terminated strings. Note that
3213 * the number of snapshots could change by the time we read
3214 * it in, in which case we re-read it.
3221 size = sizeof (*ondisk);
3222 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3224 ondisk = kmalloc(size, GFP_KERNEL);
3228 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3232 if ((size_t)ret < size) {
3234 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3238 if (!rbd_dev_ondisk_valid(ondisk)) {
3240 rbd_warn(rbd_dev, "invalid header");
3244 names_size = le64_to_cpu(ondisk->snap_names_len);
3245 want_count = snap_count;
3246 snap_count = le32_to_cpu(ondisk->snap_count);
3247 } while (snap_count != want_count);
3249 ret = rbd_header_from_disk(rbd_dev, ondisk);
3257 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3258 * has disappeared from the (just updated) snapshot context.
3260 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3264 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3267 snap_id = rbd_dev->spec->snap_id;
3268 if (snap_id == CEPH_NOSNAP)
3271 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3272 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3275 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3281 * Don't hold the lock while doing disk operations,
3282 * or lock ordering will conflict with the bdev mutex via:
3283 * rbd_add() -> blkdev_get() -> rbd_open()
3285 spin_lock_irq(&rbd_dev->lock);
3286 removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
3287 spin_unlock_irq(&rbd_dev->lock);
3289 * If the device is being removed, rbd_dev->disk has
3290 * been destroyed, so don't try to update its size
3293 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3294 dout("setting size to %llu sectors", (unsigned long long)size);
3295 set_capacity(rbd_dev->disk, size);
3296 revalidate_disk(rbd_dev->disk);
3300 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3305 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
3306 down_write(&rbd_dev->header_rwsem);
3307 mapping_size = rbd_dev->mapping.size;
3308 if (rbd_dev->image_format == 1)
3309 ret = rbd_dev_v1_header_info(rbd_dev);
3311 ret = rbd_dev_v2_header_info(rbd_dev);
3313 /* If it's a mapped snapshot, validate its EXISTS flag */
3315 rbd_exists_validate(rbd_dev);
3316 up_write(&rbd_dev->header_rwsem);
3318 if (mapping_size != rbd_dev->mapping.size) {
3319 rbd_dev_update_size(rbd_dev);
3325 static int rbd_init_disk(struct rbd_device *rbd_dev)
3327 struct gendisk *disk;
3328 struct request_queue *q;
3331 /* create gendisk info */
3332 disk = alloc_disk(RBD_MINORS_PER_MAJOR);
3336 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3338 disk->major = rbd_dev->major;
3339 disk->first_minor = 0;
3340 disk->fops = &rbd_bd_ops;
3341 disk->private_data = rbd_dev;
3343 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
3347 /* We use the default size, but let's be explicit about it. */
3348 blk_queue_physical_block_size(q, SECTOR_SIZE);
3350 /* set io sizes to object size */
3351 segment_size = rbd_obj_bytes(&rbd_dev->header);
3352 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3353 blk_queue_max_segment_size(q, segment_size);
3354 blk_queue_io_min(q, segment_size);
3355 blk_queue_io_opt(q, segment_size);
3357 blk_queue_merge_bvec(q, rbd_merge_bvec);
3360 q->queuedata = rbd_dev;
3362 rbd_dev->disk = disk;
3375 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3377 return container_of(dev, struct rbd_device, dev);
3380 static ssize_t rbd_size_show(struct device *dev,
3381 struct device_attribute *attr, char *buf)
3383 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3385 return sprintf(buf, "%llu\n",
3386 (unsigned long long)rbd_dev->mapping.size);
3390 * Note this shows the features for whatever's mapped, which is not
3391 * necessarily the base image.
3393 static ssize_t rbd_features_show(struct device *dev,
3394 struct device_attribute *attr, char *buf)
3396 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3398 return sprintf(buf, "0x%016llx\n",
3399 (unsigned long long)rbd_dev->mapping.features);
3402 static ssize_t rbd_major_show(struct device *dev,
3403 struct device_attribute *attr, char *buf)
3405 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3408 return sprintf(buf, "%d\n", rbd_dev->major);
3410 return sprintf(buf, "(none)\n");
3414 static ssize_t rbd_client_id_show(struct device *dev,
3415 struct device_attribute *attr, char *buf)
3417 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3419 return sprintf(buf, "client%lld\n",
3420 ceph_client_id(rbd_dev->rbd_client->client));
3423 static ssize_t rbd_pool_show(struct device *dev,
3424 struct device_attribute *attr, char *buf)
3426 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3428 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3431 static ssize_t rbd_pool_id_show(struct device *dev,
3432 struct device_attribute *attr, char *buf)
3434 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3436 return sprintf(buf, "%llu\n",
3437 (unsigned long long) rbd_dev->spec->pool_id);
3440 static ssize_t rbd_name_show(struct device *dev,
3441 struct device_attribute *attr, char *buf)
3443 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3445 if (rbd_dev->spec->image_name)
3446 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3448 return sprintf(buf, "(unknown)\n");
3451 static ssize_t rbd_image_id_show(struct device *dev,
3452 struct device_attribute *attr, char *buf)
3454 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3456 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3460 * Shows the name of the currently-mapped snapshot (or
3461 * RBD_SNAP_HEAD_NAME for the base image).
3463 static ssize_t rbd_snap_show(struct device *dev,
3464 struct device_attribute *attr,
3467 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3469 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3473 * For an rbd v2 image, shows the pool id, image id, and snapshot id
3474 * for the parent image. If there is no parent, simply shows
3475 * "(no parent image)".
3477 static ssize_t rbd_parent_show(struct device *dev,
3478 struct device_attribute *attr,
3481 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3482 struct rbd_spec *spec = rbd_dev->parent_spec;
3487 return sprintf(buf, "(no parent image)\n");
3489 count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
3490 (unsigned long long) spec->pool_id, spec->pool_name);
3495 count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
3496 spec->image_name ? spec->image_name : "(unknown)");
3501 count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
3502 (unsigned long long) spec->snap_id, spec->snap_name);
3507 count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
3512 return (ssize_t) (bufp - buf);
3515 static ssize_t rbd_image_refresh(struct device *dev,
3516 struct device_attribute *attr,
3520 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3523 ret = rbd_dev_refresh(rbd_dev);
3525 rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret);
3527 return ret < 0 ? ret : size;
3530 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3531 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3532 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3533 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3534 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3535 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3536 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3537 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3538 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3539 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3540 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3542 static struct attribute *rbd_attrs[] = {
3543 &dev_attr_size.attr,
3544 &dev_attr_features.attr,
3545 &dev_attr_major.attr,
3546 &dev_attr_client_id.attr,
3547 &dev_attr_pool.attr,
3548 &dev_attr_pool_id.attr,
3549 &dev_attr_name.attr,
3550 &dev_attr_image_id.attr,
3551 &dev_attr_current_snap.attr,
3552 &dev_attr_parent.attr,
3553 &dev_attr_refresh.attr,
3557 static struct attribute_group rbd_attr_group = {
3561 static const struct attribute_group *rbd_attr_groups[] = {
3566 static void rbd_sysfs_dev_release(struct device *dev)
3570 static struct device_type rbd_device_type = {
3572 .groups = rbd_attr_groups,
3573 .release = rbd_sysfs_dev_release,
3576 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3578 kref_get(&spec->kref);
3583 static void rbd_spec_free(struct kref *kref);
3584 static void rbd_spec_put(struct rbd_spec *spec)
3587 kref_put(&spec->kref, rbd_spec_free);
3590 static struct rbd_spec *rbd_spec_alloc(void)
3592 struct rbd_spec *spec;
3594 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3597 kref_init(&spec->kref);
3602 static void rbd_spec_free(struct kref *kref)
3604 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3606 kfree(spec->pool_name);
3607 kfree(spec->image_id);
3608 kfree(spec->image_name);
3609 kfree(spec->snap_name);
3613 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
3614 struct rbd_spec *spec)
3616 struct rbd_device *rbd_dev;
3618 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3622 spin_lock_init(&rbd_dev->lock);
3624 atomic_set(&rbd_dev->parent_ref, 0);
3625 INIT_LIST_HEAD(&rbd_dev->node);
3626 init_rwsem(&rbd_dev->header_rwsem);
3628 rbd_dev->spec = spec;
3629 rbd_dev->rbd_client = rbdc;
3631 /* Initialize the layout used for all rbd requests */
3633 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3634 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3635 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3636 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3641 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3643 rbd_put_client(rbd_dev->rbd_client);
3644 rbd_spec_put(rbd_dev->spec);
3649 * Get the size and object order for an image snapshot, or if
3650 * snap_id is CEPH_NOSNAP, gets this information for the base
3653 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3654 u8 *order, u64 *snap_size)
3656 __le64 snapid = cpu_to_le64(snap_id);
3661 } __attribute__ ((packed)) size_buf = { 0 };
3663 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3665 &snapid, sizeof (snapid),
3666 &size_buf, sizeof (size_buf));
3667 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3670 if (ret < sizeof (size_buf))
3674 *order = size_buf.order;
3675 dout(" order %u", (unsigned int)*order);
3677 *snap_size = le64_to_cpu(size_buf.size);
3679 dout(" snap_id 0x%016llx snap_size = %llu\n",
3680 (unsigned long long)snap_id,
3681 (unsigned long long)*snap_size);
3686 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3688 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3689 &rbd_dev->header.obj_order,
3690 &rbd_dev->header.image_size);
3693 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3699 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
3703 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3704 "rbd", "get_object_prefix", NULL, 0,
3705 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
3706 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3711 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
3712 p + ret, NULL, GFP_NOIO);
3715 if (IS_ERR(rbd_dev->header.object_prefix)) {
3716 ret = PTR_ERR(rbd_dev->header.object_prefix);
3717 rbd_dev->header.object_prefix = NULL;
3719 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
3727 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
3730 __le64 snapid = cpu_to_le64(snap_id);
3734 } __attribute__ ((packed)) features_buf = { 0 };
3738 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3739 "rbd", "get_features",
3740 &snapid, sizeof (snapid),
3741 &features_buf, sizeof (features_buf));
3742 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3745 if (ret < sizeof (features_buf))
3748 incompat = le64_to_cpu(features_buf.incompat);
3749 if (incompat & ~RBD_FEATURES_SUPPORTED)
3752 *snap_features = le64_to_cpu(features_buf.features);
3754 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
3755 (unsigned long long)snap_id,
3756 (unsigned long long)*snap_features,
3757 (unsigned long long)le64_to_cpu(features_buf.incompat));
3762 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
3764 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
3765 &rbd_dev->header.features);
3768 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3770 struct rbd_spec *parent_spec;
3772 void *reply_buf = NULL;
3782 parent_spec = rbd_spec_alloc();
3786 size = sizeof (__le64) + /* pool_id */
3787 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
3788 sizeof (__le64) + /* snap_id */
3789 sizeof (__le64); /* overlap */
3790 reply_buf = kmalloc(size, GFP_KERNEL);
3796 snapid = cpu_to_le64(CEPH_NOSNAP);
3797 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3798 "rbd", "get_parent",
3799 &snapid, sizeof (snapid),
3801 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3806 end = reply_buf + ret;
3808 ceph_decode_64_safe(&p, end, pool_id, out_err);
3809 if (pool_id == CEPH_NOPOOL) {
3811 * Either the parent never existed, or we have
3812 * record of it but the image got flattened so it no
3813 * longer has a parent. When the parent of a
3814 * layered image disappears we immediately set the
3815 * overlap to 0. The effect of this is that all new
3816 * requests will be treated as if the image had no
3819 if (rbd_dev->parent_overlap) {
3820 rbd_dev->parent_overlap = 0;
3822 rbd_dev_parent_put(rbd_dev);
3823 pr_info("%s: clone image has been flattened\n",
3824 rbd_dev->disk->disk_name);
3827 goto out; /* No parent? No problem. */
3830 /* The ceph file layout needs to fit pool id in 32 bits */
3833 if (pool_id > (u64)U32_MAX) {
3834 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
3835 (unsigned long long)pool_id, U32_MAX);
3839 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3840 if (IS_ERR(image_id)) {
3841 ret = PTR_ERR(image_id);
3844 ceph_decode_64_safe(&p, end, snap_id, out_err);
3845 ceph_decode_64_safe(&p, end, overlap, out_err);
3848 * The parent won't change (except when the clone is
3849 * flattened, already handled that). So we only need to
3850 * record the parent spec we have not already done so.
3852 if (!rbd_dev->parent_spec) {
3853 parent_spec->pool_id = pool_id;
3854 parent_spec->image_id = image_id;
3855 parent_spec->snap_id = snap_id;
3856 rbd_dev->parent_spec = parent_spec;
3857 parent_spec = NULL; /* rbd_dev now owns this */
3861 * We always update the parent overlap. If it's zero we
3862 * treat it specially.
3864 rbd_dev->parent_overlap = overlap;
3868 /* A null parent_spec indicates it's the initial probe */
3872 * The overlap has become zero, so the clone
3873 * must have been resized down to 0 at some
3874 * point. Treat this the same as a flatten.
3876 rbd_dev_parent_put(rbd_dev);
3877 pr_info("%s: clone image now standalone\n",
3878 rbd_dev->disk->disk_name);
3881 * For the initial probe, if we find the
3882 * overlap is zero we just pretend there was
3885 rbd_warn(rbd_dev, "ignoring parent of "
3886 "clone with overlap 0\n");
3893 rbd_spec_put(parent_spec);
3898 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
3902 __le64 stripe_count;
3903 } __attribute__ ((packed)) striping_info_buf = { 0 };
3904 size_t size = sizeof (striping_info_buf);
3911 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3912 "rbd", "get_stripe_unit_count", NULL, 0,
3913 (char *)&striping_info_buf, size);
3914 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3921 * We don't actually support the "fancy striping" feature
3922 * (STRIPINGV2) yet, but if the striping sizes are the
3923 * defaults the behavior is the same as before. So find
3924 * out, and only fail if the image has non-default values.
3927 obj_size = (u64)1 << rbd_dev->header.obj_order;
3928 p = &striping_info_buf;
3929 stripe_unit = ceph_decode_64(&p);
3930 if (stripe_unit != obj_size) {
3931 rbd_warn(rbd_dev, "unsupported stripe unit "
3932 "(got %llu want %llu)",
3933 stripe_unit, obj_size);
3936 stripe_count = ceph_decode_64(&p);
3937 if (stripe_count != 1) {
3938 rbd_warn(rbd_dev, "unsupported stripe count "
3939 "(got %llu want 1)", stripe_count);
3942 rbd_dev->header.stripe_unit = stripe_unit;
3943 rbd_dev->header.stripe_count = stripe_count;
3948 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
3950 size_t image_id_size;
3955 void *reply_buf = NULL;
3957 char *image_name = NULL;
3960 rbd_assert(!rbd_dev->spec->image_name);
3962 len = strlen(rbd_dev->spec->image_id);
3963 image_id_size = sizeof (__le32) + len;
3964 image_id = kmalloc(image_id_size, GFP_KERNEL);
3969 end = image_id + image_id_size;
3970 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
3972 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
3973 reply_buf = kmalloc(size, GFP_KERNEL);
3977 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
3978 "rbd", "dir_get_name",
3979 image_id, image_id_size,
3984 end = reply_buf + ret;
3986 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
3987 if (IS_ERR(image_name))
3990 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
3998 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4000 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4001 const char *snap_name;
4004 /* Skip over names until we find the one we are looking for */
4006 snap_name = rbd_dev->header.snap_names;
4007 while (which < snapc->num_snaps) {
4008 if (!strcmp(name, snap_name))
4009 return snapc->snaps[which];
4010 snap_name += strlen(snap_name) + 1;
4016 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4018 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4023 for (which = 0; !found && which < snapc->num_snaps; which++) {
4024 const char *snap_name;
4026 snap_id = snapc->snaps[which];
4027 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4028 if (IS_ERR(snap_name)) {
4029 /* ignore no-longer existing snapshots */
4030 if (PTR_ERR(snap_name) == -ENOENT)
4035 found = !strcmp(name, snap_name);
4038 return found ? snap_id : CEPH_NOSNAP;
4042 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4043 * no snapshot by that name is found, or if an error occurs.
4045 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4047 if (rbd_dev->image_format == 1)
4048 return rbd_v1_snap_id_by_name(rbd_dev, name);
4050 return rbd_v2_snap_id_by_name(rbd_dev, name);
4054 * When an rbd image has a parent image, it is identified by the
4055 * pool, image, and snapshot ids (not names). This function fills
4056 * in the names for those ids. (It's OK if we can't figure out the
4057 * name for an image id, but the pool and snapshot ids should always
4058 * exist and have names.) All names in an rbd spec are dynamically
4061 * When an image being mapped (not a parent) is probed, we have the
4062 * pool name and pool id, image name and image id, and the snapshot
4063 * name. The only thing we're missing is the snapshot id.
4065 static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
4067 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4068 struct rbd_spec *spec = rbd_dev->spec;
4069 const char *pool_name;
4070 const char *image_name;
4071 const char *snap_name;
4075 * An image being mapped will have the pool name (etc.), but
4076 * we need to look up the snapshot id.
4078 if (spec->pool_name) {
4079 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4082 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4083 if (snap_id == CEPH_NOSNAP)
4085 spec->snap_id = snap_id;
4087 spec->snap_id = CEPH_NOSNAP;
4093 /* Get the pool name; we have to make our own copy of this */
4095 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4097 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4100 pool_name = kstrdup(pool_name, GFP_KERNEL);
4104 /* Fetch the image name; tolerate failure here */
4106 image_name = rbd_dev_image_name(rbd_dev);
4108 rbd_warn(rbd_dev, "unable to get image name");
4110 /* Look up the snapshot name, and make a copy */
4112 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4113 if (IS_ERR(snap_name)) {
4114 ret = PTR_ERR(snap_name);
4118 spec->pool_name = pool_name;
4119 spec->image_name = image_name;
4120 spec->snap_name = snap_name;
4130 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4139 struct ceph_snap_context *snapc;
4143 * We'll need room for the seq value (maximum snapshot id),
4144 * snapshot count, and array of that many snapshot ids.
4145 * For now we have a fixed upper limit on the number we're
4146 * prepared to receive.
4148 size = sizeof (__le64) + sizeof (__le32) +
4149 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4150 reply_buf = kzalloc(size, GFP_KERNEL);
4154 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4155 "rbd", "get_snapcontext", NULL, 0,
4157 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4162 end = reply_buf + ret;
4164 ceph_decode_64_safe(&p, end, seq, out);
4165 ceph_decode_32_safe(&p, end, snap_count, out);
4168 * Make sure the reported number of snapshot ids wouldn't go
4169 * beyond the end of our buffer. But before checking that,
4170 * make sure the computed size of the snapshot context we
4171 * allocate is representable in a size_t.
4173 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4178 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4182 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4188 for (i = 0; i < snap_count; i++)
4189 snapc->snaps[i] = ceph_decode_64(&p);
4191 ceph_put_snap_context(rbd_dev->header.snapc);
4192 rbd_dev->header.snapc = snapc;
4194 dout(" snap context seq = %llu, snap_count = %u\n",
4195 (unsigned long long)seq, (unsigned int)snap_count);
4202 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4213 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4214 reply_buf = kmalloc(size, GFP_KERNEL);
4216 return ERR_PTR(-ENOMEM);
4218 snapid = cpu_to_le64(snap_id);
4219 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4220 "rbd", "get_snapshot_name",
4221 &snapid, sizeof (snapid),
4223 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4225 snap_name = ERR_PTR(ret);
4230 end = reply_buf + ret;
4231 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4232 if (IS_ERR(snap_name))
4235 dout(" snap_id 0x%016llx snap_name = %s\n",
4236 (unsigned long long)snap_id, snap_name);
4243 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4245 bool first_time = rbd_dev->header.object_prefix == NULL;
4248 ret = rbd_dev_v2_image_size(rbd_dev);
4253 ret = rbd_dev_v2_header_onetime(rbd_dev);
4259 * If the image supports layering, get the parent info. We
4260 * need to probe the first time regardless. Thereafter we
4261 * only need to if there's a parent, to see if it has
4262 * disappeared due to the mapped image getting flattened.
4264 if (rbd_dev->header.features & RBD_FEATURE_LAYERING &&
4265 (first_time || rbd_dev->parent_spec)) {
4268 ret = rbd_dev_v2_parent_info(rbd_dev);
4273 * Print a warning if this is the initial probe and
4274 * the image has a parent. Don't print it if the
4275 * image now being probed is itself a parent. We
4276 * can tell at this point because we won't know its
4277 * pool name yet (just its pool id).
4279 warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name;
4280 if (first_time && warn)
4281 rbd_warn(rbd_dev, "WARNING: kernel layering "
4282 "is EXPERIMENTAL!");
4285 if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
4286 if (rbd_dev->mapping.size != rbd_dev->header.image_size)
4287 rbd_dev->mapping.size = rbd_dev->header.image_size;
4289 ret = rbd_dev_v2_snap_context(rbd_dev);
4290 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4295 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4300 dev = &rbd_dev->dev;
4301 dev->bus = &rbd_bus_type;
4302 dev->type = &rbd_device_type;
4303 dev->parent = &rbd_root_dev;
4304 dev->release = rbd_dev_device_release;
4305 dev_set_name(dev, "%d", rbd_dev->dev_id);
4306 ret = device_register(dev);
4311 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4313 device_unregister(&rbd_dev->dev);
4316 static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
4319 * Get a unique rbd identifier for the given new rbd_dev, and add
4320 * the rbd_dev to the global list. The minimum rbd id is 1.
4322 static void rbd_dev_id_get(struct rbd_device *rbd_dev)
4324 rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
4326 spin_lock(&rbd_dev_list_lock);
4327 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4328 spin_unlock(&rbd_dev_list_lock);
4329 dout("rbd_dev %p given dev id %llu\n", rbd_dev,
4330 (unsigned long long) rbd_dev->dev_id);
4334 * Remove an rbd_dev from the global list, and record that its
4335 * identifier is no longer in use.
4337 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4339 struct list_head *tmp;
4340 int rbd_id = rbd_dev->dev_id;
4343 rbd_assert(rbd_id > 0);
4345 dout("rbd_dev %p released dev id %llu\n", rbd_dev,
4346 (unsigned long long) rbd_dev->dev_id);
4347 spin_lock(&rbd_dev_list_lock);
4348 list_del_init(&rbd_dev->node);
4351 * If the id being "put" is not the current maximum, there
4352 * is nothing special we need to do.
4354 if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
4355 spin_unlock(&rbd_dev_list_lock);
4360 * We need to update the current maximum id. Search the
4361 * list to find out what it is. We're more likely to find
4362 * the maximum at the end, so search the list backward.
4365 list_for_each_prev(tmp, &rbd_dev_list) {
4366 struct rbd_device *rbd_dev;
4368 rbd_dev = list_entry(tmp, struct rbd_device, node);
4369 if (rbd_dev->dev_id > max_id)
4370 max_id = rbd_dev->dev_id;
4372 spin_unlock(&rbd_dev_list_lock);
4375 * The max id could have been updated by rbd_dev_id_get(), in
4376 * which case it now accurately reflects the new maximum.
4377 * Be careful not to overwrite the maximum value in that
4380 atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
4381 dout(" max dev id has been reset\n");
4385 * Skips over white space at *buf, and updates *buf to point to the
4386 * first found non-space character (if any). Returns the length of
4387 * the token (string of non-white space characters) found. Note
4388 * that *buf must be terminated with '\0'.
4390 static inline size_t next_token(const char **buf)
4393 * These are the characters that produce nonzero for
4394 * isspace() in the "C" and "POSIX" locales.
4396 const char *spaces = " \f\n\r\t\v";
4398 *buf += strspn(*buf, spaces); /* Find start of token */
4400 return strcspn(*buf, spaces); /* Return token length */
4404 * Finds the next token in *buf, and if the provided token buffer is
4405 * big enough, copies the found token into it. The result, if
4406 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4407 * must be terminated with '\0' on entry.
4409 * Returns the length of the token found (not including the '\0').
4410 * Return value will be 0 if no token is found, and it will be >=
4411 * token_size if the token would not fit.
4413 * The *buf pointer will be updated to point beyond the end of the
4414 * found token. Note that this occurs even if the token buffer is
4415 * too small to hold it.
4417 static inline size_t copy_token(const char **buf,
4423 len = next_token(buf);
4424 if (len < token_size) {
4425 memcpy(token, *buf, len);
4426 *(token + len) = '\0';
4434 * Finds the next token in *buf, dynamically allocates a buffer big
4435 * enough to hold a copy of it, and copies the token into the new
4436 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4437 * that a duplicate buffer is created even for a zero-length token.
4439 * Returns a pointer to the newly-allocated duplicate, or a null
4440 * pointer if memory for the duplicate was not available. If
4441 * the lenp argument is a non-null pointer, the length of the token
4442 * (not including the '\0') is returned in *lenp.
4444 * If successful, the *buf pointer will be updated to point beyond
4445 * the end of the found token.
4447 * Note: uses GFP_KERNEL for allocation.
4449 static inline char *dup_token(const char **buf, size_t *lenp)
4454 len = next_token(buf);
4455 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4458 *(dup + len) = '\0';
4468 * Parse the options provided for an "rbd add" (i.e., rbd image
4469 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4470 * and the data written is passed here via a NUL-terminated buffer.
4471 * Returns 0 if successful or an error code otherwise.
4473 * The information extracted from these options is recorded in
4474 * the other parameters which return dynamically-allocated
4477 * The address of a pointer that will refer to a ceph options
4478 * structure. Caller must release the returned pointer using
4479 * ceph_destroy_options() when it is no longer needed.
4481 * Address of an rbd options pointer. Fully initialized by
4482 * this function; caller must release with kfree().
4484 * Address of an rbd image specification pointer. Fully
4485 * initialized by this function based on parsed options.
4486 * Caller must release with rbd_spec_put().
4488 * The options passed take this form:
4489 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4492 * A comma-separated list of one or more monitor addresses.
4493 * A monitor address is an ip address, optionally followed
4494 * by a port number (separated by a colon).
4495 * I.e.: ip1[:port1][,ip2[:port2]...]
4497 * A comma-separated list of ceph and/or rbd options.
4499 * The name of the rados pool containing the rbd image.
4501 * The name of the image in that pool to map.
4503 * An optional snapshot id. If provided, the mapping will
4504 * present data from the image at the time that snapshot was
4505 * created. The image head is used if no snapshot id is
4506 * provided. Snapshot mappings are always read-only.
4508 static int rbd_add_parse_args(const char *buf,
4509 struct ceph_options **ceph_opts,
4510 struct rbd_options **opts,
4511 struct rbd_spec **rbd_spec)
4515 const char *mon_addrs;
4517 size_t mon_addrs_size;
4518 struct rbd_spec *spec = NULL;
4519 struct rbd_options *rbd_opts = NULL;
4520 struct ceph_options *copts;
4523 /* The first four tokens are required */
4525 len = next_token(&buf);
4527 rbd_warn(NULL, "no monitor address(es) provided");
4531 mon_addrs_size = len + 1;
4535 options = dup_token(&buf, NULL);
4539 rbd_warn(NULL, "no options provided");
4543 spec = rbd_spec_alloc();
4547 spec->pool_name = dup_token(&buf, NULL);
4548 if (!spec->pool_name)
4550 if (!*spec->pool_name) {
4551 rbd_warn(NULL, "no pool name provided");
4555 spec->image_name = dup_token(&buf, NULL);
4556 if (!spec->image_name)
4558 if (!*spec->image_name) {
4559 rbd_warn(NULL, "no image name provided");
4564 * Snapshot name is optional; default is to use "-"
4565 * (indicating the head/no snapshot).
4567 len = next_token(&buf);
4569 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4570 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4571 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4572 ret = -ENAMETOOLONG;
4575 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4578 *(snap_name + len) = '\0';
4579 spec->snap_name = snap_name;
4581 /* Initialize all rbd options to the defaults */
4583 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4587 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4589 copts = ceph_parse_options(options, mon_addrs,
4590 mon_addrs + mon_addrs_size - 1,
4591 parse_rbd_opts_token, rbd_opts);
4592 if (IS_ERR(copts)) {
4593 ret = PTR_ERR(copts);
4614 * An rbd format 2 image has a unique identifier, distinct from the
4615 * name given to it by the user. Internally, that identifier is
4616 * what's used to specify the names of objects related to the image.
4618 * A special "rbd id" object is used to map an rbd image name to its
4619 * id. If that object doesn't exist, then there is no v2 rbd image
4620 * with the supplied name.
4622 * This function will record the given rbd_dev's image_id field if
4623 * it can be determined, and in that case will return 0. If any
4624 * errors occur a negative errno will be returned and the rbd_dev's
4625 * image_id field will be unchanged (and should be NULL).
4627 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4636 * When probing a parent image, the image id is already
4637 * known (and the image name likely is not). There's no
4638 * need to fetch the image id again in this case. We
4639 * do still need to set the image format though.
4641 if (rbd_dev->spec->image_id) {
4642 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4648 * First, see if the format 2 image id file exists, and if
4649 * so, get the image's persistent id from it.
4651 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
4652 object_name = kmalloc(size, GFP_NOIO);
4655 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
4656 dout("rbd id object name is %s\n", object_name);
4658 /* Response will be an encoded string, which includes a length */
4660 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4661 response = kzalloc(size, GFP_NOIO);
4667 /* If it doesn't exist we'll assume it's a format 1 image */
4669 ret = rbd_obj_method_sync(rbd_dev, object_name,
4670 "rbd", "get_id", NULL, 0,
4671 response, RBD_IMAGE_ID_LEN_MAX);
4672 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4673 if (ret == -ENOENT) {
4674 image_id = kstrdup("", GFP_KERNEL);
4675 ret = image_id ? 0 : -ENOMEM;
4677 rbd_dev->image_format = 1;
4678 } else if (ret > sizeof (__le32)) {
4681 image_id = ceph_extract_encoded_string(&p, p + ret,
4683 ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0;
4685 rbd_dev->image_format = 2;
4691 rbd_dev->spec->image_id = image_id;
4692 dout("image_id is %s\n", image_id);
4702 * Undo whatever state changes are made by v1 or v2 header info
4705 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4707 struct rbd_image_header *header;
4709 /* Drop parent reference unless it's already been done (or none) */
4711 if (rbd_dev->parent_overlap)
4712 rbd_dev_parent_put(rbd_dev);
4714 /* Free dynamic fields from the header, then zero it out */
4716 header = &rbd_dev->header;
4717 ceph_put_snap_context(header->snapc);
4718 kfree(header->snap_sizes);
4719 kfree(header->snap_names);
4720 kfree(header->object_prefix);
4721 memset(header, 0, sizeof (*header));
4724 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
4728 ret = rbd_dev_v2_object_prefix(rbd_dev);
4733 * Get the and check features for the image. Currently the
4734 * features are assumed to never change.
4736 ret = rbd_dev_v2_features(rbd_dev);
4740 /* If the image supports fancy striping, get its parameters */
4742 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
4743 ret = rbd_dev_v2_striping_info(rbd_dev);
4747 /* No support for crypto and compression type format 2 images */
4751 rbd_dev->header.features = 0;
4752 kfree(rbd_dev->header.object_prefix);
4753 rbd_dev->header.object_prefix = NULL;
4758 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
4760 struct rbd_device *parent = NULL;
4761 struct rbd_spec *parent_spec;
4762 struct rbd_client *rbdc;
4765 if (!rbd_dev->parent_spec)
4768 * We need to pass a reference to the client and the parent
4769 * spec when creating the parent rbd_dev. Images related by
4770 * parent/child relationships always share both.
4772 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
4773 rbdc = __rbd_get_client(rbd_dev->rbd_client);
4776 parent = rbd_dev_create(rbdc, parent_spec);
4780 ret = rbd_dev_image_probe(parent, false);
4783 rbd_dev->parent = parent;
4784 atomic_set(&rbd_dev->parent_ref, 1);
4789 rbd_dev_unparent(rbd_dev);
4790 kfree(rbd_dev->header_name);
4791 rbd_dev_destroy(parent);
4793 rbd_put_client(rbdc);
4794 rbd_spec_put(parent_spec);
4800 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4804 /* generate unique id: find highest unique id, add one */
4805 rbd_dev_id_get(rbd_dev);
4807 /* Fill in the device name, now that we have its id. */
4808 BUILD_BUG_ON(DEV_NAME_LEN
4809 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
4810 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
4812 /* Get our block major device number. */
4814 ret = register_blkdev(0, rbd_dev->name);
4817 rbd_dev->major = ret;
4819 /* Set up the blkdev mapping. */
4821 ret = rbd_init_disk(rbd_dev);
4823 goto err_out_blkdev;
4825 ret = rbd_dev_mapping_set(rbd_dev);
4828 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
4830 ret = rbd_bus_add_dev(rbd_dev);
4832 goto err_out_mapping;
4834 /* Everything's ready. Announce the disk to the world. */
4836 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4837 add_disk(rbd_dev->disk);
4839 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
4840 (unsigned long long) rbd_dev->mapping.size);
4845 rbd_dev_mapping_clear(rbd_dev);
4847 rbd_free_disk(rbd_dev);
4849 unregister_blkdev(rbd_dev->major, rbd_dev->name);
4851 rbd_dev_id_put(rbd_dev);
4852 rbd_dev_mapping_clear(rbd_dev);
4857 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
4859 struct rbd_spec *spec = rbd_dev->spec;
4862 /* Record the header object name for this rbd image. */
4864 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4866 if (rbd_dev->image_format == 1)
4867 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
4869 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
4871 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
4872 if (!rbd_dev->header_name)
4875 if (rbd_dev->image_format == 1)
4876 sprintf(rbd_dev->header_name, "%s%s",
4877 spec->image_name, RBD_SUFFIX);
4879 sprintf(rbd_dev->header_name, "%s%s",
4880 RBD_HEADER_PREFIX, spec->image_id);
4884 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
4886 rbd_dev_unprobe(rbd_dev);
4887 kfree(rbd_dev->header_name);
4888 rbd_dev->header_name = NULL;
4889 rbd_dev->image_format = 0;
4890 kfree(rbd_dev->spec->image_id);
4891 rbd_dev->spec->image_id = NULL;
4893 rbd_dev_destroy(rbd_dev);
4897 * Probe for the existence of the header object for the given rbd
4898 * device. If this image is the one being mapped (i.e., not a
4899 * parent), initiate a watch on its header object before using that
4900 * object to get detailed information about the rbd image.
4902 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
4908 * Get the id from the image id object. Unless there's an
4909 * error, rbd_dev->spec->image_id will be filled in with
4910 * a dynamically-allocated string, and rbd_dev->image_format
4911 * will be set to either 1 or 2.
4913 ret = rbd_dev_image_id(rbd_dev);
4916 rbd_assert(rbd_dev->spec->image_id);
4917 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4919 ret = rbd_dev_header_name(rbd_dev);
4921 goto err_out_format;
4924 ret = rbd_dev_header_watch_sync(rbd_dev, true);
4926 goto out_header_name;
4929 if (rbd_dev->image_format == 1)
4930 ret = rbd_dev_v1_header_info(rbd_dev);
4932 ret = rbd_dev_v2_header_info(rbd_dev);
4936 ret = rbd_dev_spec_update(rbd_dev);
4940 ret = rbd_dev_probe_parent(rbd_dev);
4944 dout("discovered format %u image, header name is %s\n",
4945 rbd_dev->image_format, rbd_dev->header_name);
4949 rbd_dev_unprobe(rbd_dev);
4952 tmp = rbd_dev_header_watch_sync(rbd_dev, false);
4954 rbd_warn(rbd_dev, "unable to tear down "
4955 "watch request (%d)\n", tmp);
4958 kfree(rbd_dev->header_name);
4959 rbd_dev->header_name = NULL;
4961 rbd_dev->image_format = 0;
4962 kfree(rbd_dev->spec->image_id);
4963 rbd_dev->spec->image_id = NULL;
4965 dout("probe failed, returning %d\n", ret);
4970 static ssize_t rbd_add(struct bus_type *bus,
4974 struct rbd_device *rbd_dev = NULL;
4975 struct ceph_options *ceph_opts = NULL;
4976 struct rbd_options *rbd_opts = NULL;
4977 struct rbd_spec *spec = NULL;
4978 struct rbd_client *rbdc;
4979 struct ceph_osd_client *osdc;
4983 if (!try_module_get(THIS_MODULE))
4986 /* parse add command */
4987 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
4989 goto err_out_module;
4990 read_only = rbd_opts->read_only;
4992 rbd_opts = NULL; /* done with this */
4994 rbdc = rbd_get_client(ceph_opts);
5001 osdc = &rbdc->client->osdc;
5002 rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
5004 goto err_out_client;
5005 spec->pool_id = (u64)rc;
5007 /* The ceph file layout needs to fit pool id in 32 bits */
5009 if (spec->pool_id > (u64)U32_MAX) {
5010 rbd_warn(NULL, "pool id too large (%llu > %u)\n",
5011 (unsigned long long)spec->pool_id, U32_MAX);
5013 goto err_out_client;
5016 rbd_dev = rbd_dev_create(rbdc, spec);
5018 goto err_out_client;
5019 rbdc = NULL; /* rbd_dev now owns this */
5020 spec = NULL; /* rbd_dev now owns this */
5022 rc = rbd_dev_image_probe(rbd_dev, true);
5024 goto err_out_rbd_dev;
5026 /* If we are mapping a snapshot it must be marked read-only */
5028 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5030 rbd_dev->mapping.read_only = read_only;
5032 rc = rbd_dev_device_setup(rbd_dev);
5034 rbd_dev_image_release(rbd_dev);
5035 goto err_out_module;
5041 rbd_dev_destroy(rbd_dev);
5043 rbd_put_client(rbdc);
5047 module_put(THIS_MODULE);
5049 dout("Error adding device %s\n", buf);
5054 static void rbd_dev_device_release(struct device *dev)
5056 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5058 rbd_free_disk(rbd_dev);
5059 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5060 rbd_dev_mapping_clear(rbd_dev);
5061 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5063 rbd_dev_id_put(rbd_dev);
5064 rbd_dev_mapping_clear(rbd_dev);
5067 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5069 while (rbd_dev->parent) {
5070 struct rbd_device *first = rbd_dev;
5071 struct rbd_device *second = first->parent;
5072 struct rbd_device *third;
5075 * Follow to the parent with no grandparent and
5078 while (second && (third = second->parent)) {
5083 rbd_dev_image_release(second);
5084 first->parent = NULL;
5085 first->parent_overlap = 0;
5087 rbd_assert(first->parent_spec);
5088 rbd_spec_put(first->parent_spec);
5089 first->parent_spec = NULL;
5093 static ssize_t rbd_remove(struct bus_type *bus,
5097 struct rbd_device *rbd_dev = NULL;
5098 struct list_head *tmp;
5101 bool already = false;
5104 ret = kstrtoul(buf, 10, &ul);
5108 /* convert to int; abort if we lost anything in the conversion */
5114 spin_lock(&rbd_dev_list_lock);
5115 list_for_each(tmp, &rbd_dev_list) {
5116 rbd_dev = list_entry(tmp, struct rbd_device, node);
5117 if (rbd_dev->dev_id == dev_id) {
5123 spin_lock_irq(&rbd_dev->lock);
5124 if (rbd_dev->open_count)
5127 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5129 spin_unlock_irq(&rbd_dev->lock);
5131 spin_unlock(&rbd_dev_list_lock);
5132 if (ret < 0 || already)
5135 ret = rbd_dev_header_watch_sync(rbd_dev, false);
5137 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
5140 * flush remaining watch callbacks - these must be complete
5141 * before the osd_client is shutdown
5143 dout("%s: flushing notifies", __func__);
5144 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
5146 * Don't free anything from rbd_dev->disk until after all
5147 * notifies are completely processed. Otherwise
5148 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5149 * in a potential use after free of rbd_dev->disk or rbd_dev.
5151 rbd_bus_del_dev(rbd_dev);
5152 rbd_dev_image_release(rbd_dev);
5153 module_put(THIS_MODULE);
5159 * create control files in sysfs
5162 static int rbd_sysfs_init(void)
5166 ret = device_register(&rbd_root_dev);
5170 ret = bus_register(&rbd_bus_type);
5172 device_unregister(&rbd_root_dev);
5177 static void rbd_sysfs_cleanup(void)
5179 bus_unregister(&rbd_bus_type);
5180 device_unregister(&rbd_root_dev);
5183 static int rbd_slab_init(void)
5185 rbd_assert(!rbd_img_request_cache);
5186 rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5187 sizeof (struct rbd_img_request),
5188 __alignof__(struct rbd_img_request),
5190 if (!rbd_img_request_cache)
5193 rbd_assert(!rbd_obj_request_cache);
5194 rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5195 sizeof (struct rbd_obj_request),
5196 __alignof__(struct rbd_obj_request),
5198 if (!rbd_obj_request_cache)
5201 rbd_assert(!rbd_segment_name_cache);
5202 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5203 MAX_OBJ_NAME_SIZE + 1, 1, 0, NULL);
5204 if (rbd_segment_name_cache)
5207 if (rbd_obj_request_cache) {
5208 kmem_cache_destroy(rbd_obj_request_cache);
5209 rbd_obj_request_cache = NULL;
5212 kmem_cache_destroy(rbd_img_request_cache);
5213 rbd_img_request_cache = NULL;
5218 static void rbd_slab_exit(void)
5220 rbd_assert(rbd_segment_name_cache);
5221 kmem_cache_destroy(rbd_segment_name_cache);
5222 rbd_segment_name_cache = NULL;
5224 rbd_assert(rbd_obj_request_cache);
5225 kmem_cache_destroy(rbd_obj_request_cache);
5226 rbd_obj_request_cache = NULL;
5228 rbd_assert(rbd_img_request_cache);
5229 kmem_cache_destroy(rbd_img_request_cache);
5230 rbd_img_request_cache = NULL;
5233 static int __init rbd_init(void)
5237 if (!libceph_compatible(NULL)) {
5238 rbd_warn(NULL, "libceph incompatibility (quitting)");
5242 rc = rbd_slab_init();
5245 rc = rbd_sysfs_init();
5249 pr_info("loaded " RBD_DRV_NAME_LONG "\n");
5254 static void __exit rbd_exit(void)
5256 rbd_sysfs_cleanup();
5260 module_init(rbd_init);
5261 module_exit(rbd_exit);
5263 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
5264 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5265 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5266 MODULE_DESCRIPTION("rados block device");
5268 /* following authorship retained from original osdblk.c */
5269 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5271 MODULE_LICENSE("GPL");