3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
42 #include <linux/blkdev.h>
43 #include <linux/slab.h>
45 #include "rbd_types.h"
47 #define RBD_DEBUG /* Activate rbd_assert() calls */
50 * The basic unit of block I/O is a sector. It is interpreted in a
51 * number of contexts in Linux (blk, bio, genhd), but the default is
52 * universally 512 bytes. These symbols are just slightly more
53 * meaningful than the bare numbers they represent.
55 #define SECTOR_SHIFT 9
56 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
59 * Increment the given counter and return its updated value.
60 * If the counter is already 0 it will not be incremented.
61 * If the counter is already at its maximum value returns
62 * -EINVAL without updating it.
64 static int atomic_inc_return_safe(atomic_t *v)
68 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
69 if (counter <= (unsigned int)INT_MAX)
77 /* Decrement the counter. Return the resulting value, or -EINVAL */
78 static int atomic_dec_return_safe(atomic_t *v)
82 counter = atomic_dec_return(v);
91 #define RBD_DRV_NAME "rbd"
92 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
94 #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
96 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
97 #define RBD_MAX_SNAP_NAME_LEN \
98 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
100 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
102 #define RBD_SNAP_HEAD_NAME "-"
104 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
106 /* This allows a single page to hold an image name sent by OSD */
107 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
108 #define RBD_IMAGE_ID_LEN_MAX 64
110 #define RBD_OBJ_PREFIX_LEN_MAX 64
114 #define RBD_FEATURE_LAYERING (1<<0)
115 #define RBD_FEATURE_STRIPINGV2 (1<<1)
116 #define RBD_FEATURES_ALL \
117 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
119 /* Features supported by this (client software) implementation. */
121 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
124 * An RBD device name will be "rbd#", where the "rbd" comes from
125 * RBD_DRV_NAME above, and # is a unique integer identifier.
126 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
127 * enough to hold all possible device names.
129 #define DEV_NAME_LEN 32
130 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
133 * block device image metadata (in-memory version)
135 struct rbd_image_header {
136 /* These six fields never change for a given rbd image */
143 u64 features; /* Might be changeable someday? */
145 /* The remaining fields need to be updated occasionally */
147 struct ceph_snap_context *snapc;
148 char *snap_names; /* format 1 only */
149 u64 *snap_sizes; /* format 1 only */
153 * An rbd image specification.
155 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
156 * identify an image. Each rbd_dev structure includes a pointer to
157 * an rbd_spec structure that encapsulates this identity.
159 * Each of the id's in an rbd_spec has an associated name. For a
160 * user-mapped image, the names are supplied and the id's associated
161 * with them are looked up. For a layered image, a parent image is
162 * defined by the tuple, and the names are looked up.
164 * An rbd_dev structure contains a parent_spec pointer which is
165 * non-null if the image it represents is a child in a layered
166 * image. This pointer will refer to the rbd_spec structure used
167 * by the parent rbd_dev for its own identity (i.e., the structure
168 * is shared between the parent and child).
170 * Since these structures are populated once, during the discovery
171 * phase of image construction, they are effectively immutable so
172 * we make no effort to synchronize access to them.
174 * Note that code herein does not assume the image name is known (it
175 * could be a null pointer).
179 const char *pool_name;
181 const char *image_id;
182 const char *image_name;
185 const char *snap_name;
191 * an instance of the client. multiple devices may share an rbd client.
194 struct ceph_client *client;
196 struct list_head node;
199 struct rbd_img_request;
200 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
202 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
204 struct rbd_obj_request;
205 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
207 enum obj_request_type {
208 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
212 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
213 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
214 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
215 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
218 struct rbd_obj_request {
219 const char *object_name;
220 u64 offset; /* object start byte */
221 u64 length; /* bytes from offset */
225 * An object request associated with an image will have its
226 * img_data flag set; a standalone object request will not.
228 * A standalone object request will have which == BAD_WHICH
229 * and a null obj_request pointer.
231 * An object request initiated in support of a layered image
232 * object (to check for its existence before a write) will
233 * have which == BAD_WHICH and a non-null obj_request pointer.
235 * Finally, an object request for rbd image data will have
236 * which != BAD_WHICH, and will have a non-null img_request
237 * pointer. The value of which will be in the range
238 * 0..(img_request->obj_request_count-1).
241 struct rbd_obj_request *obj_request; /* STAT op */
243 struct rbd_img_request *img_request;
245 /* links for img_request->obj_requests list */
246 struct list_head links;
249 u32 which; /* posn image request list */
251 enum obj_request_type type;
253 struct bio *bio_list;
259 struct page **copyup_pages;
260 u32 copyup_page_count;
262 struct ceph_osd_request *osd_req;
264 u64 xferred; /* bytes transferred */
267 rbd_obj_callback_t callback;
268 struct completion completion;
274 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
275 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
276 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
279 struct rbd_img_request {
280 struct rbd_device *rbd_dev;
281 u64 offset; /* starting image byte offset */
282 u64 length; /* byte count from offset */
285 u64 snap_id; /* for reads */
286 struct ceph_snap_context *snapc; /* for writes */
289 struct request *rq; /* block request */
290 struct rbd_obj_request *obj_request; /* obj req initiator */
292 struct page **copyup_pages;
293 u32 copyup_page_count;
294 spinlock_t completion_lock;/* protects next_completion */
296 rbd_img_callback_t callback;
297 u64 xferred;/* aggregate bytes transferred */
298 int result; /* first nonzero obj_request result */
300 u32 obj_request_count;
301 struct list_head obj_requests; /* rbd_obj_request structs */
306 #define for_each_obj_request(ireq, oreq) \
307 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
308 #define for_each_obj_request_from(ireq, oreq) \
309 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
310 #define for_each_obj_request_safe(ireq, oreq, n) \
311 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
323 int dev_id; /* blkdev unique id */
325 int major; /* blkdev assigned major */
326 struct gendisk *disk; /* blkdev's gendisk and rq */
328 u32 image_format; /* Either 1 or 2 */
329 struct rbd_client *rbd_client;
331 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
333 spinlock_t lock; /* queue, flags, open_count */
335 struct rbd_image_header header;
336 unsigned long flags; /* possibly lock protected */
337 struct rbd_spec *spec;
341 struct ceph_file_layout layout;
343 struct ceph_osd_event *watch_event;
344 struct rbd_obj_request *watch_request;
346 struct rbd_spec *parent_spec;
349 struct rbd_device *parent;
351 /* protects updating the header */
352 struct rw_semaphore header_rwsem;
354 struct rbd_mapping mapping;
356 struct list_head node;
360 unsigned long open_count; /* protected by lock */
364 * Flag bits for rbd_dev->flags. If atomicity is required,
365 * rbd_dev->lock is used to protect access.
367 * Currently, only the "removing" flag (which is coupled with the
368 * "open_count" field) requires atomic access.
371 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
372 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
375 static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
377 static LIST_HEAD(rbd_dev_list); /* devices */
378 static DEFINE_SPINLOCK(rbd_dev_list_lock);
380 static LIST_HEAD(rbd_client_list); /* clients */
381 static DEFINE_SPINLOCK(rbd_client_list_lock);
383 /* Slab caches for frequently-allocated structures */
385 static struct kmem_cache *rbd_img_request_cache;
386 static struct kmem_cache *rbd_obj_request_cache;
387 static struct kmem_cache *rbd_segment_name_cache;
389 static int rbd_img_request_submit(struct rbd_img_request *img_request);
391 static void rbd_dev_device_release(struct device *dev);
393 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
395 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
397 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
398 static void rbd_spec_put(struct rbd_spec *spec);
400 static struct bus_attribute rbd_bus_attrs[] = {
401 __ATTR(add, S_IWUSR, NULL, rbd_add),
402 __ATTR(remove, S_IWUSR, NULL, rbd_remove),
406 static struct bus_type rbd_bus_type = {
408 .bus_attrs = rbd_bus_attrs,
411 static void rbd_root_dev_release(struct device *dev)
415 static struct device rbd_root_dev = {
417 .release = rbd_root_dev_release,
420 static __printf(2, 3)
421 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
423 struct va_format vaf;
431 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
432 else if (rbd_dev->disk)
433 printk(KERN_WARNING "%s: %s: %pV\n",
434 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
435 else if (rbd_dev->spec && rbd_dev->spec->image_name)
436 printk(KERN_WARNING "%s: image %s: %pV\n",
437 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
438 else if (rbd_dev->spec && rbd_dev->spec->image_id)
439 printk(KERN_WARNING "%s: id %s: %pV\n",
440 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
442 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
443 RBD_DRV_NAME, rbd_dev, &vaf);
448 #define rbd_assert(expr) \
449 if (unlikely(!(expr))) { \
450 printk(KERN_ERR "\nAssertion failure in %s() " \
452 "\trbd_assert(%s);\n\n", \
453 __func__, __LINE__, #expr); \
456 #else /* !RBD_DEBUG */
457 # define rbd_assert(expr) ((void) 0)
458 #endif /* !RBD_DEBUG */
460 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
461 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
462 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
464 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
465 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
466 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev);
467 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
469 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
470 u8 *order, u64 *snap_size);
471 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
473 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
475 static int rbd_open(struct block_device *bdev, fmode_t mode)
477 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
478 bool removing = false;
480 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
483 spin_lock_irq(&rbd_dev->lock);
484 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
487 rbd_dev->open_count++;
488 spin_unlock_irq(&rbd_dev->lock);
492 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
493 (void) get_device(&rbd_dev->dev);
494 set_device_ro(bdev, rbd_dev->mapping.read_only);
495 mutex_unlock(&ctl_mutex);
500 static void rbd_release(struct gendisk *disk, fmode_t mode)
502 struct rbd_device *rbd_dev = disk->private_data;
503 unsigned long open_count_before;
505 spin_lock_irq(&rbd_dev->lock);
506 open_count_before = rbd_dev->open_count--;
507 spin_unlock_irq(&rbd_dev->lock);
508 rbd_assert(open_count_before > 0);
510 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
511 put_device(&rbd_dev->dev);
512 mutex_unlock(&ctl_mutex);
515 static const struct block_device_operations rbd_bd_ops = {
516 .owner = THIS_MODULE,
518 .release = rbd_release,
522 * Initialize an rbd client instance. Success or not, this function
523 * consumes ceph_opts.
525 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
527 struct rbd_client *rbdc;
530 dout("%s:\n", __func__);
531 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
535 kref_init(&rbdc->kref);
536 INIT_LIST_HEAD(&rbdc->node);
538 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
540 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
541 if (IS_ERR(rbdc->client))
543 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
545 ret = ceph_open_session(rbdc->client);
549 spin_lock(&rbd_client_list_lock);
550 list_add_tail(&rbdc->node, &rbd_client_list);
551 spin_unlock(&rbd_client_list_lock);
553 mutex_unlock(&ctl_mutex);
554 dout("%s: rbdc %p\n", __func__, rbdc);
559 ceph_destroy_client(rbdc->client);
561 mutex_unlock(&ctl_mutex);
565 ceph_destroy_options(ceph_opts);
566 dout("%s: error %d\n", __func__, ret);
571 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
573 kref_get(&rbdc->kref);
579 * Find a ceph client with specific addr and configuration. If
580 * found, bump its reference count.
582 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
584 struct rbd_client *client_node;
587 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
590 spin_lock(&rbd_client_list_lock);
591 list_for_each_entry(client_node, &rbd_client_list, node) {
592 if (!ceph_compare_options(ceph_opts, client_node->client)) {
593 __rbd_get_client(client_node);
599 spin_unlock(&rbd_client_list_lock);
601 return found ? client_node : NULL;
611 /* string args above */
614 /* Boolean args above */
618 static match_table_t rbd_opts_tokens = {
620 /* string args above */
621 {Opt_read_only, "read_only"},
622 {Opt_read_only, "ro"}, /* Alternate spelling */
623 {Opt_read_write, "read_write"},
624 {Opt_read_write, "rw"}, /* Alternate spelling */
625 /* Boolean args above */
633 #define RBD_READ_ONLY_DEFAULT false
635 static int parse_rbd_opts_token(char *c, void *private)
637 struct rbd_options *rbd_opts = private;
638 substring_t argstr[MAX_OPT_ARGS];
639 int token, intval, ret;
641 token = match_token(c, rbd_opts_tokens, argstr);
645 if (token < Opt_last_int) {
646 ret = match_int(&argstr[0], &intval);
648 pr_err("bad mount option arg (not int) "
652 dout("got int token %d val %d\n", token, intval);
653 } else if (token > Opt_last_int && token < Opt_last_string) {
654 dout("got string token %d val %s\n", token,
656 } else if (token > Opt_last_string && token < Opt_last_bool) {
657 dout("got Boolean token %d\n", token);
659 dout("got token %d\n", token);
664 rbd_opts->read_only = true;
667 rbd_opts->read_only = false;
677 * Get a ceph client with specific addr and configuration, if one does
678 * not exist create it. Either way, ceph_opts is consumed by this
681 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
683 struct rbd_client *rbdc;
685 rbdc = rbd_client_find(ceph_opts);
686 if (rbdc) /* using an existing client */
687 ceph_destroy_options(ceph_opts);
689 rbdc = rbd_client_create(ceph_opts);
695 * Destroy ceph client
697 * Caller must hold rbd_client_list_lock.
699 static void rbd_client_release(struct kref *kref)
701 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
703 dout("%s: rbdc %p\n", __func__, rbdc);
704 spin_lock(&rbd_client_list_lock);
705 list_del(&rbdc->node);
706 spin_unlock(&rbd_client_list_lock);
708 ceph_destroy_client(rbdc->client);
713 * Drop reference to ceph client node. If it's not referenced anymore, release
716 static void rbd_put_client(struct rbd_client *rbdc)
719 kref_put(&rbdc->kref, rbd_client_release);
722 static bool rbd_image_format_valid(u32 image_format)
724 return image_format == 1 || image_format == 2;
727 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
732 /* The header has to start with the magic rbd header text */
733 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
736 /* The bio layer requires at least sector-sized I/O */
738 if (ondisk->options.order < SECTOR_SHIFT)
741 /* If we use u64 in a few spots we may be able to loosen this */
743 if (ondisk->options.order > 8 * sizeof (int) - 1)
747 * The size of a snapshot header has to fit in a size_t, and
748 * that limits the number of snapshots.
750 snap_count = le32_to_cpu(ondisk->snap_count);
751 size = SIZE_MAX - sizeof (struct ceph_snap_context);
752 if (snap_count > size / sizeof (__le64))
756 * Not only that, but the size of the entire the snapshot
757 * header must also be representable in a size_t.
759 size -= snap_count * sizeof (__le64);
760 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
767 * Fill an rbd image header with information from the given format 1
770 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
771 struct rbd_image_header_ondisk *ondisk)
773 struct rbd_image_header *header = &rbd_dev->header;
774 bool first_time = header->object_prefix == NULL;
775 struct ceph_snap_context *snapc;
776 char *object_prefix = NULL;
777 char *snap_names = NULL;
778 u64 *snap_sizes = NULL;
784 /* Allocate this now to avoid having to handle failure below */
789 len = strnlen(ondisk->object_prefix,
790 sizeof (ondisk->object_prefix));
791 object_prefix = kmalloc(len + 1, GFP_KERNEL);
794 memcpy(object_prefix, ondisk->object_prefix, len);
795 object_prefix[len] = '\0';
798 /* Allocate the snapshot context and fill it in */
800 snap_count = le32_to_cpu(ondisk->snap_count);
801 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
804 snapc->seq = le64_to_cpu(ondisk->snap_seq);
806 struct rbd_image_snap_ondisk *snaps;
807 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
809 /* We'll keep a copy of the snapshot names... */
811 if (snap_names_len > (u64)SIZE_MAX)
813 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
817 /* ...as well as the array of their sizes. */
819 size = snap_count * sizeof (*header->snap_sizes);
820 snap_sizes = kmalloc(size, GFP_KERNEL);
825 * Copy the names, and fill in each snapshot's id
828 * Note that rbd_dev_v1_header_info() guarantees the
829 * ondisk buffer we're working with has
830 * snap_names_len bytes beyond the end of the
831 * snapshot id array, this memcpy() is safe.
833 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
834 snaps = ondisk->snaps;
835 for (i = 0; i < snap_count; i++) {
836 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
837 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
841 /* We won't fail any more, fill in the header */
843 down_write(&rbd_dev->header_rwsem);
845 header->object_prefix = object_prefix;
846 header->obj_order = ondisk->options.order;
847 header->crypt_type = ondisk->options.crypt_type;
848 header->comp_type = ondisk->options.comp_type;
849 /* The rest aren't used for format 1 images */
850 header->stripe_unit = 0;
851 header->stripe_count = 0;
852 header->features = 0;
854 ceph_put_snap_context(header->snapc);
855 kfree(header->snap_names);
856 kfree(header->snap_sizes);
859 /* The remaining fields always get updated (when we refresh) */
861 header->image_size = le64_to_cpu(ondisk->image_size);
862 header->snapc = snapc;
863 header->snap_names = snap_names;
864 header->snap_sizes = snap_sizes;
866 /* Make sure mapping size is consistent with header info */
868 if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time)
869 if (rbd_dev->mapping.size != header->image_size)
870 rbd_dev->mapping.size = header->image_size;
872 up_write(&rbd_dev->header_rwsem);
880 ceph_put_snap_context(snapc);
881 kfree(object_prefix);
886 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
888 const char *snap_name;
890 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
892 /* Skip over names until we find the one we are looking for */
894 snap_name = rbd_dev->header.snap_names;
896 snap_name += strlen(snap_name) + 1;
898 return kstrdup(snap_name, GFP_KERNEL);
902 * Snapshot id comparison function for use with qsort()/bsearch().
903 * Note that result is for snapshots in *descending* order.
905 static int snapid_compare_reverse(const void *s1, const void *s2)
907 u64 snap_id1 = *(u64 *)s1;
908 u64 snap_id2 = *(u64 *)s2;
910 if (snap_id1 < snap_id2)
912 return snap_id1 == snap_id2 ? 0 : -1;
916 * Search a snapshot context to see if the given snapshot id is
919 * Returns the position of the snapshot id in the array if it's found,
920 * or BAD_SNAP_INDEX otherwise.
922 * Note: The snapshot array is in kept sorted (by the osd) in
923 * reverse order, highest snapshot id first.
925 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
927 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
930 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
931 sizeof (snap_id), snapid_compare_reverse);
933 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
936 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
941 which = rbd_dev_snap_index(rbd_dev, snap_id);
942 if (which == BAD_SNAP_INDEX)
945 return _rbd_dev_v1_snap_name(rbd_dev, which);
948 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
950 if (snap_id == CEPH_NOSNAP)
951 return RBD_SNAP_HEAD_NAME;
953 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
954 if (rbd_dev->image_format == 1)
955 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
957 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
960 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
963 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
964 if (snap_id == CEPH_NOSNAP) {
965 *snap_size = rbd_dev->header.image_size;
966 } else if (rbd_dev->image_format == 1) {
969 which = rbd_dev_snap_index(rbd_dev, snap_id);
970 if (which == BAD_SNAP_INDEX)
973 *snap_size = rbd_dev->header.snap_sizes[which];
978 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
987 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
990 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
991 if (snap_id == CEPH_NOSNAP) {
992 *snap_features = rbd_dev->header.features;
993 } else if (rbd_dev->image_format == 1) {
994 *snap_features = 0; /* No features for format 1 */
999 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1003 *snap_features = features;
1008 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1010 u64 snap_id = rbd_dev->spec->snap_id;
1015 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1018 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1022 rbd_dev->mapping.size = size;
1023 rbd_dev->mapping.features = features;
1028 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1030 rbd_dev->mapping.size = 0;
1031 rbd_dev->mapping.features = 0;
1034 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1041 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1044 segment = offset >> rbd_dev->header.obj_order;
1045 name_format = "%s.%012llx";
1046 if (rbd_dev->image_format == 2)
1047 name_format = "%s.%016llx";
1048 ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, name_format,
1049 rbd_dev->header.object_prefix, segment);
1050 if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
1051 pr_err("error formatting segment name for #%llu (%d)\n",
1060 static void rbd_segment_name_free(const char *name)
1062 /* The explicit cast here is needed to drop the const qualifier */
1064 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1067 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1069 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1071 return offset & (segment_size - 1);
1074 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1075 u64 offset, u64 length)
1077 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1079 offset &= segment_size - 1;
1081 rbd_assert(length <= U64_MAX - offset);
1082 if (offset + length > segment_size)
1083 length = segment_size - offset;
1089 * returns the size of an object in the image
1091 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1093 return 1 << header->obj_order;
1100 static void bio_chain_put(struct bio *chain)
1106 chain = chain->bi_next;
1112 * zeros a bio chain, starting at specific offset
1114 static void zero_bio_chain(struct bio *chain, int start_ofs)
1117 unsigned long flags;
1123 bio_for_each_segment(bv, chain, i) {
1124 if (pos + bv->bv_len > start_ofs) {
1125 int remainder = max(start_ofs - pos, 0);
1126 buf = bvec_kmap_irq(bv, &flags);
1127 memset(buf + remainder, 0,
1128 bv->bv_len - remainder);
1129 flush_dcache_page(bv->bv_page);
1130 bvec_kunmap_irq(buf, &flags);
1135 chain = chain->bi_next;
1140 * similar to zero_bio_chain(), zeros data defined by a page array,
1141 * starting at the given byte offset from the start of the array and
1142 * continuing up to the given end offset. The pages array is
1143 * assumed to be big enough to hold all bytes up to the end.
1145 static void zero_pages(struct page **pages, u64 offset, u64 end)
1147 struct page **page = &pages[offset >> PAGE_SHIFT];
1149 rbd_assert(end > offset);
1150 rbd_assert(end - offset <= (u64)SIZE_MAX);
1151 while (offset < end) {
1154 unsigned long flags;
1157 page_offset = offset & ~PAGE_MASK;
1158 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
1159 local_irq_save(flags);
1160 kaddr = kmap_atomic(*page);
1161 memset(kaddr + page_offset, 0, length);
1162 flush_dcache_page(*page);
1163 kunmap_atomic(kaddr);
1164 local_irq_restore(flags);
1172 * Clone a portion of a bio, starting at the given byte offset
1173 * and continuing for the number of bytes indicated.
1175 static struct bio *bio_clone_range(struct bio *bio_src,
1176 unsigned int offset,
1184 unsigned short end_idx;
1185 unsigned short vcnt;
1188 /* Handle the easy case for the caller */
1190 if (!offset && len == bio_src->bi_size)
1191 return bio_clone(bio_src, gfpmask);
1193 if (WARN_ON_ONCE(!len))
1195 if (WARN_ON_ONCE(len > bio_src->bi_size))
1197 if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
1200 /* Find first affected segment... */
1203 bio_for_each_segment(bv, bio_src, idx) {
1204 if (resid < bv->bv_len)
1206 resid -= bv->bv_len;
1210 /* ...and the last affected segment */
1213 __bio_for_each_segment(bv, bio_src, end_idx, idx) {
1214 if (resid <= bv->bv_len)
1216 resid -= bv->bv_len;
1218 vcnt = end_idx - idx + 1;
1220 /* Build the clone */
1222 bio = bio_alloc(gfpmask, (unsigned int) vcnt);
1224 return NULL; /* ENOMEM */
1226 bio->bi_bdev = bio_src->bi_bdev;
1227 bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
1228 bio->bi_rw = bio_src->bi_rw;
1229 bio->bi_flags |= 1 << BIO_CLONED;
1232 * Copy over our part of the bio_vec, then update the first
1233 * and last (or only) entries.
1235 memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
1236 vcnt * sizeof (struct bio_vec));
1237 bio->bi_io_vec[0].bv_offset += voff;
1239 bio->bi_io_vec[0].bv_len -= voff;
1240 bio->bi_io_vec[vcnt - 1].bv_len = resid;
1242 bio->bi_io_vec[0].bv_len = len;
1245 bio->bi_vcnt = vcnt;
1253 * Clone a portion of a bio chain, starting at the given byte offset
1254 * into the first bio in the source chain and continuing for the
1255 * number of bytes indicated. The result is another bio chain of
1256 * exactly the given length, or a null pointer on error.
1258 * The bio_src and offset parameters are both in-out. On entry they
1259 * refer to the first source bio and the offset into that bio where
1260 * the start of data to be cloned is located.
1262 * On return, bio_src is updated to refer to the bio in the source
1263 * chain that contains first un-cloned byte, and *offset will
1264 * contain the offset of that byte within that bio.
1266 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1267 unsigned int *offset,
1271 struct bio *bi = *bio_src;
1272 unsigned int off = *offset;
1273 struct bio *chain = NULL;
1276 /* Build up a chain of clone bios up to the limit */
1278 if (!bi || off >= bi->bi_size || !len)
1279 return NULL; /* Nothing to clone */
1283 unsigned int bi_size;
1287 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1288 goto out_err; /* EINVAL; ran out of bio's */
1290 bi_size = min_t(unsigned int, bi->bi_size - off, len);
1291 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1293 goto out_err; /* ENOMEM */
1296 end = &bio->bi_next;
1299 if (off == bi->bi_size) {
1310 bio_chain_put(chain);
1316 * The default/initial value for all object request flags is 0. For
1317 * each flag, once its value is set to 1 it is never reset to 0
1320 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1322 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1323 struct rbd_device *rbd_dev;
1325 rbd_dev = obj_request->img_request->rbd_dev;
1326 rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
1331 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1334 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1337 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1339 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1340 struct rbd_device *rbd_dev = NULL;
1342 if (obj_request_img_data_test(obj_request))
1343 rbd_dev = obj_request->img_request->rbd_dev;
1344 rbd_warn(rbd_dev, "obj_request %p already marked done\n",
1349 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1352 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1356 * This sets the KNOWN flag after (possibly) setting the EXISTS
1357 * flag. The latter is set based on the "exists" value provided.
1359 * Note that for our purposes once an object exists it never goes
1360 * away again. It's possible that the response from two existence
1361 * checks are separated by the creation of the target object, and
1362 * the first ("doesn't exist") response arrives *after* the second
1363 * ("does exist"). In that case we ignore the second one.
1365 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1369 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1370 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1374 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1377 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1380 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1383 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1386 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1388 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1389 atomic_read(&obj_request->kref.refcount));
1390 kref_get(&obj_request->kref);
1393 static void rbd_obj_request_destroy(struct kref *kref);
1394 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1396 rbd_assert(obj_request != NULL);
1397 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1398 atomic_read(&obj_request->kref.refcount));
1399 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1402 static bool img_request_child_test(struct rbd_img_request *img_request);
1403 static void rbd_parent_request_destroy(struct kref *kref);
1404 static void rbd_img_request_destroy(struct kref *kref);
1405 static void rbd_img_request_put(struct rbd_img_request *img_request)
1407 rbd_assert(img_request != NULL);
1408 dout("%s: img %p (was %d)\n", __func__, img_request,
1409 atomic_read(&img_request->kref.refcount));
1410 if (img_request_child_test(img_request))
1411 kref_put(&img_request->kref, rbd_parent_request_destroy);
1413 kref_put(&img_request->kref, rbd_img_request_destroy);
1416 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1417 struct rbd_obj_request *obj_request)
1419 rbd_assert(obj_request->img_request == NULL);
1421 /* Image request now owns object's original reference */
1422 obj_request->img_request = img_request;
1423 obj_request->which = img_request->obj_request_count;
1424 rbd_assert(!obj_request_img_data_test(obj_request));
1425 obj_request_img_data_set(obj_request);
1426 rbd_assert(obj_request->which != BAD_WHICH);
1427 img_request->obj_request_count++;
1428 list_add_tail(&obj_request->links, &img_request->obj_requests);
1429 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1430 obj_request->which);
1433 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1434 struct rbd_obj_request *obj_request)
1436 rbd_assert(obj_request->which != BAD_WHICH);
1438 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1439 obj_request->which);
1440 list_del(&obj_request->links);
1441 rbd_assert(img_request->obj_request_count > 0);
1442 img_request->obj_request_count--;
1443 rbd_assert(obj_request->which == img_request->obj_request_count);
1444 obj_request->which = BAD_WHICH;
1445 rbd_assert(obj_request_img_data_test(obj_request));
1446 rbd_assert(obj_request->img_request == img_request);
1447 obj_request->img_request = NULL;
1448 obj_request->callback = NULL;
1449 rbd_obj_request_put(obj_request);
1452 static bool obj_request_type_valid(enum obj_request_type type)
1455 case OBJ_REQUEST_NODATA:
1456 case OBJ_REQUEST_BIO:
1457 case OBJ_REQUEST_PAGES:
1464 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1465 struct rbd_obj_request *obj_request)
1467 dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1469 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1472 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1475 dout("%s: img %p\n", __func__, img_request);
1478 * If no error occurred, compute the aggregate transfer
1479 * count for the image request. We could instead use
1480 * atomic64_cmpxchg() to update it as each object request
1481 * completes; not clear which way is better off hand.
1483 if (!img_request->result) {
1484 struct rbd_obj_request *obj_request;
1487 for_each_obj_request(img_request, obj_request)
1488 xferred += obj_request->xferred;
1489 img_request->xferred = xferred;
1492 if (img_request->callback)
1493 img_request->callback(img_request);
1495 rbd_img_request_put(img_request);
1498 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1500 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1502 dout("%s: obj %p\n", __func__, obj_request);
1504 return wait_for_completion_interruptible(&obj_request->completion);
1508 * The default/initial value for all image request flags is 0. Each
1509 * is conditionally set to 1 at image request initialization time
1510 * and currently never change thereafter.
1512 static void img_request_write_set(struct rbd_img_request *img_request)
1514 set_bit(IMG_REQ_WRITE, &img_request->flags);
1518 static bool img_request_write_test(struct rbd_img_request *img_request)
1521 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1524 static void img_request_child_set(struct rbd_img_request *img_request)
1526 set_bit(IMG_REQ_CHILD, &img_request->flags);
1530 static void img_request_child_clear(struct rbd_img_request *img_request)
1532 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1536 static bool img_request_child_test(struct rbd_img_request *img_request)
1539 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1542 static void img_request_layered_set(struct rbd_img_request *img_request)
1544 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1548 static void img_request_layered_clear(struct rbd_img_request *img_request)
1550 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1554 static bool img_request_layered_test(struct rbd_img_request *img_request)
1557 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1561 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1563 u64 xferred = obj_request->xferred;
1564 u64 length = obj_request->length;
1566 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1567 obj_request, obj_request->img_request, obj_request->result,
1570 * ENOENT means a hole in the image. We zero-fill the
1571 * entire length of the request. A short read also implies
1572 * zero-fill to the end of the request. Either way we
1573 * update the xferred count to indicate the whole request
1576 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1577 if (obj_request->result == -ENOENT) {
1578 if (obj_request->type == OBJ_REQUEST_BIO)
1579 zero_bio_chain(obj_request->bio_list, 0);
1581 zero_pages(obj_request->pages, 0, length);
1582 obj_request->result = 0;
1583 obj_request->xferred = length;
1584 } else if (xferred < length && !obj_request->result) {
1585 if (obj_request->type == OBJ_REQUEST_BIO)
1586 zero_bio_chain(obj_request->bio_list, xferred);
1588 zero_pages(obj_request->pages, xferred, length);
1589 obj_request->xferred = length;
1591 obj_request_done_set(obj_request);
1594 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1596 dout("%s: obj %p cb %p\n", __func__, obj_request,
1597 obj_request->callback);
1598 if (obj_request->callback)
1599 obj_request->callback(obj_request);
1601 complete_all(&obj_request->completion);
1604 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1606 dout("%s: obj %p\n", __func__, obj_request);
1607 obj_request_done_set(obj_request);
1610 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1612 struct rbd_img_request *img_request = NULL;
1613 struct rbd_device *rbd_dev = NULL;
1614 bool layered = false;
1616 if (obj_request_img_data_test(obj_request)) {
1617 img_request = obj_request->img_request;
1618 layered = img_request && img_request_layered_test(img_request);
1619 rbd_dev = img_request->rbd_dev;
1622 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1623 obj_request, img_request, obj_request->result,
1624 obj_request->xferred, obj_request->length);
1625 if (layered && obj_request->result == -ENOENT &&
1626 obj_request->img_offset < rbd_dev->parent_overlap)
1627 rbd_img_parent_read(obj_request);
1628 else if (img_request)
1629 rbd_img_obj_request_read_callback(obj_request);
1631 obj_request_done_set(obj_request);
1634 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1636 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1637 obj_request->result, obj_request->length);
1639 * There is no such thing as a successful short write. Set
1640 * it to our originally-requested length.
1642 obj_request->xferred = obj_request->length;
1643 obj_request_done_set(obj_request);
1647 * For a simple stat call there's nothing to do. We'll do more if
1648 * this is part of a write sequence for a layered image.
1650 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1652 dout("%s: obj %p\n", __func__, obj_request);
1653 obj_request_done_set(obj_request);
1656 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1657 struct ceph_msg *msg)
1659 struct rbd_obj_request *obj_request = osd_req->r_priv;
1662 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1663 rbd_assert(osd_req == obj_request->osd_req);
1664 if (obj_request_img_data_test(obj_request)) {
1665 rbd_assert(obj_request->img_request);
1666 rbd_assert(obj_request->which != BAD_WHICH);
1668 rbd_assert(obj_request->which == BAD_WHICH);
1671 if (osd_req->r_result < 0)
1672 obj_request->result = osd_req->r_result;
1674 BUG_ON(osd_req->r_num_ops > 2);
1677 * We support a 64-bit length, but ultimately it has to be
1678 * passed to blk_end_request(), which takes an unsigned int.
1680 obj_request->xferred = osd_req->r_reply_op_len[0];
1681 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1682 opcode = osd_req->r_ops[0].op;
1684 case CEPH_OSD_OP_READ:
1685 rbd_osd_read_callback(obj_request);
1687 case CEPH_OSD_OP_WRITE:
1688 rbd_osd_write_callback(obj_request);
1690 case CEPH_OSD_OP_STAT:
1691 rbd_osd_stat_callback(obj_request);
1693 case CEPH_OSD_OP_CALL:
1694 case CEPH_OSD_OP_NOTIFY_ACK:
1695 case CEPH_OSD_OP_WATCH:
1696 rbd_osd_trivial_callback(obj_request);
1699 rbd_warn(NULL, "%s: unsupported op %hu\n",
1700 obj_request->object_name, (unsigned short) opcode);
1704 if (obj_request_done_test(obj_request))
1705 rbd_obj_request_complete(obj_request);
1708 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1710 struct rbd_img_request *img_request = obj_request->img_request;
1711 struct ceph_osd_request *osd_req = obj_request->osd_req;
1714 rbd_assert(osd_req != NULL);
1716 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1717 ceph_osdc_build_request(osd_req, obj_request->offset,
1718 NULL, snap_id, NULL);
1721 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1723 struct rbd_img_request *img_request = obj_request->img_request;
1724 struct ceph_osd_request *osd_req = obj_request->osd_req;
1725 struct ceph_snap_context *snapc;
1726 struct timespec mtime = CURRENT_TIME;
1728 rbd_assert(osd_req != NULL);
1730 snapc = img_request ? img_request->snapc : NULL;
1731 ceph_osdc_build_request(osd_req, obj_request->offset,
1732 snapc, CEPH_NOSNAP, &mtime);
1735 static struct ceph_osd_request *rbd_osd_req_create(
1736 struct rbd_device *rbd_dev,
1738 struct rbd_obj_request *obj_request)
1740 struct ceph_snap_context *snapc = NULL;
1741 struct ceph_osd_client *osdc;
1742 struct ceph_osd_request *osd_req;
1744 if (obj_request_img_data_test(obj_request)) {
1745 struct rbd_img_request *img_request = obj_request->img_request;
1747 rbd_assert(write_request ==
1748 img_request_write_test(img_request));
1750 snapc = img_request->snapc;
1753 /* Allocate and initialize the request, for the single op */
1755 osdc = &rbd_dev->rbd_client->client->osdc;
1756 osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
1758 return NULL; /* ENOMEM */
1761 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1763 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1765 osd_req->r_callback = rbd_osd_req_callback;
1766 osd_req->r_priv = obj_request;
1768 osd_req->r_oid_len = strlen(obj_request->object_name);
1769 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1770 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1772 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1778 * Create a copyup osd request based on the information in the
1779 * object request supplied. A copyup request has two osd ops,
1780 * a copyup method call, and a "normal" write request.
1782 static struct ceph_osd_request *
1783 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1785 struct rbd_img_request *img_request;
1786 struct ceph_snap_context *snapc;
1787 struct rbd_device *rbd_dev;
1788 struct ceph_osd_client *osdc;
1789 struct ceph_osd_request *osd_req;
1791 rbd_assert(obj_request_img_data_test(obj_request));
1792 img_request = obj_request->img_request;
1793 rbd_assert(img_request);
1794 rbd_assert(img_request_write_test(img_request));
1796 /* Allocate and initialize the request, for the two ops */
1798 snapc = img_request->snapc;
1799 rbd_dev = img_request->rbd_dev;
1800 osdc = &rbd_dev->rbd_client->client->osdc;
1801 osd_req = ceph_osdc_alloc_request(osdc, snapc, 2, false, GFP_ATOMIC);
1803 return NULL; /* ENOMEM */
1805 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1806 osd_req->r_callback = rbd_osd_req_callback;
1807 osd_req->r_priv = obj_request;
1809 osd_req->r_oid_len = strlen(obj_request->object_name);
1810 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1811 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1813 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1819 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1821 ceph_osdc_put_request(osd_req);
1824 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1826 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1827 u64 offset, u64 length,
1828 enum obj_request_type type)
1830 struct rbd_obj_request *obj_request;
1834 rbd_assert(obj_request_type_valid(type));
1836 size = strlen(object_name) + 1;
1837 name = kmalloc(size, GFP_KERNEL);
1841 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
1847 obj_request->object_name = memcpy(name, object_name, size);
1848 obj_request->offset = offset;
1849 obj_request->length = length;
1850 obj_request->flags = 0;
1851 obj_request->which = BAD_WHICH;
1852 obj_request->type = type;
1853 INIT_LIST_HEAD(&obj_request->links);
1854 init_completion(&obj_request->completion);
1855 kref_init(&obj_request->kref);
1857 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1858 offset, length, (int)type, obj_request);
1863 static void rbd_obj_request_destroy(struct kref *kref)
1865 struct rbd_obj_request *obj_request;
1867 obj_request = container_of(kref, struct rbd_obj_request, kref);
1869 dout("%s: obj %p\n", __func__, obj_request);
1871 rbd_assert(obj_request->img_request == NULL);
1872 rbd_assert(obj_request->which == BAD_WHICH);
1874 if (obj_request->osd_req)
1875 rbd_osd_req_destroy(obj_request->osd_req);
1877 rbd_assert(obj_request_type_valid(obj_request->type));
1878 switch (obj_request->type) {
1879 case OBJ_REQUEST_NODATA:
1880 break; /* Nothing to do */
1881 case OBJ_REQUEST_BIO:
1882 if (obj_request->bio_list)
1883 bio_chain_put(obj_request->bio_list);
1885 case OBJ_REQUEST_PAGES:
1886 if (obj_request->pages)
1887 ceph_release_page_vector(obj_request->pages,
1888 obj_request->page_count);
1892 kfree(obj_request->object_name);
1893 obj_request->object_name = NULL;
1894 kmem_cache_free(rbd_obj_request_cache, obj_request);
1897 /* It's OK to call this for a device with no parent */
1899 static void rbd_spec_put(struct rbd_spec *spec);
1900 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1902 rbd_dev_remove_parent(rbd_dev);
1903 rbd_spec_put(rbd_dev->parent_spec);
1904 rbd_dev->parent_spec = NULL;
1905 rbd_dev->parent_overlap = 0;
1909 * Parent image reference counting is used to determine when an
1910 * image's parent fields can be safely torn down--after there are no
1911 * more in-flight requests to the parent image. When the last
1912 * reference is dropped, cleaning them up is safe.
1914 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1918 if (!rbd_dev->parent_spec)
1921 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1925 /* Last reference; clean up parent data structures */
1928 rbd_dev_unparent(rbd_dev);
1930 rbd_warn(rbd_dev, "parent reference underflow\n");
1934 * If an image has a non-zero parent overlap, get a reference to its
1937 * We must get the reference before checking for the overlap to
1938 * coordinate properly with zeroing the parent overlap in
1939 * rbd_dev_v2_parent_info() when an image gets flattened. We
1940 * drop it again if there is no overlap.
1942 * Returns true if the rbd device has a parent with a non-zero
1943 * overlap and a reference for it was successfully taken, or
1946 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1950 if (!rbd_dev->parent_spec)
1953 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1954 if (counter > 0 && rbd_dev->parent_overlap)
1957 /* Image was flattened, but parent is not yet torn down */
1960 rbd_warn(rbd_dev, "parent reference overflow\n");
1966 * Caller is responsible for filling in the list of object requests
1967 * that comprises the image request, and the Linux request pointer
1968 * (if there is one).
1970 static struct rbd_img_request *rbd_img_request_create(
1971 struct rbd_device *rbd_dev,
1972 u64 offset, u64 length,
1975 struct rbd_img_request *img_request;
1977 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC);
1981 if (write_request) {
1982 down_read(&rbd_dev->header_rwsem);
1983 ceph_get_snap_context(rbd_dev->header.snapc);
1984 up_read(&rbd_dev->header_rwsem);
1987 img_request->rq = NULL;
1988 img_request->rbd_dev = rbd_dev;
1989 img_request->offset = offset;
1990 img_request->length = length;
1991 img_request->flags = 0;
1992 if (write_request) {
1993 img_request_write_set(img_request);
1994 img_request->snapc = rbd_dev->header.snapc;
1996 img_request->snap_id = rbd_dev->spec->snap_id;
1998 if (rbd_dev_parent_get(rbd_dev))
1999 img_request_layered_set(img_request);
2000 spin_lock_init(&img_request->completion_lock);
2001 img_request->next_completion = 0;
2002 img_request->callback = NULL;
2003 img_request->result = 0;
2004 img_request->obj_request_count = 0;
2005 INIT_LIST_HEAD(&img_request->obj_requests);
2006 kref_init(&img_request->kref);
2008 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2009 write_request ? "write" : "read", offset, length,
2015 static void rbd_img_request_destroy(struct kref *kref)
2017 struct rbd_img_request *img_request;
2018 struct rbd_obj_request *obj_request;
2019 struct rbd_obj_request *next_obj_request;
2021 img_request = container_of(kref, struct rbd_img_request, kref);
2023 dout("%s: img %p\n", __func__, img_request);
2025 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2026 rbd_img_obj_request_del(img_request, obj_request);
2027 rbd_assert(img_request->obj_request_count == 0);
2029 if (img_request_layered_test(img_request)) {
2030 img_request_layered_clear(img_request);
2031 rbd_dev_parent_put(img_request->rbd_dev);
2034 if (img_request_write_test(img_request))
2035 ceph_put_snap_context(img_request->snapc);
2037 kmem_cache_free(rbd_img_request_cache, img_request);
2040 static struct rbd_img_request *rbd_parent_request_create(
2041 struct rbd_obj_request *obj_request,
2042 u64 img_offset, u64 length)
2044 struct rbd_img_request *parent_request;
2045 struct rbd_device *rbd_dev;
2047 rbd_assert(obj_request->img_request);
2048 rbd_dev = obj_request->img_request->rbd_dev;
2050 parent_request = rbd_img_request_create(rbd_dev->parent,
2051 img_offset, length, false);
2052 if (!parent_request)
2055 img_request_child_set(parent_request);
2056 rbd_obj_request_get(obj_request);
2057 parent_request->obj_request = obj_request;
2059 return parent_request;
2062 static void rbd_parent_request_destroy(struct kref *kref)
2064 struct rbd_img_request *parent_request;
2065 struct rbd_obj_request *orig_request;
2067 parent_request = container_of(kref, struct rbd_img_request, kref);
2068 orig_request = parent_request->obj_request;
2070 parent_request->obj_request = NULL;
2071 rbd_obj_request_put(orig_request);
2072 img_request_child_clear(parent_request);
2074 rbd_img_request_destroy(kref);
2077 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2079 struct rbd_img_request *img_request;
2080 unsigned int xferred;
2084 rbd_assert(obj_request_img_data_test(obj_request));
2085 img_request = obj_request->img_request;
2087 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2088 xferred = (unsigned int)obj_request->xferred;
2089 result = obj_request->result;
2091 struct rbd_device *rbd_dev = img_request->rbd_dev;
2093 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
2094 img_request_write_test(img_request) ? "write" : "read",
2095 obj_request->length, obj_request->img_offset,
2096 obj_request->offset);
2097 rbd_warn(rbd_dev, " result %d xferred %x\n",
2099 if (!img_request->result)
2100 img_request->result = result;
2103 /* Image object requests don't own their page array */
2105 if (obj_request->type == OBJ_REQUEST_PAGES) {
2106 obj_request->pages = NULL;
2107 obj_request->page_count = 0;
2110 if (img_request_child_test(img_request)) {
2111 rbd_assert(img_request->obj_request != NULL);
2112 more = obj_request->which < img_request->obj_request_count - 1;
2114 rbd_assert(img_request->rq != NULL);
2115 more = blk_end_request(img_request->rq, result, xferred);
2121 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2123 struct rbd_img_request *img_request;
2124 u32 which = obj_request->which;
2127 rbd_assert(obj_request_img_data_test(obj_request));
2128 img_request = obj_request->img_request;
2130 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2131 rbd_assert(img_request != NULL);
2132 rbd_assert(img_request->obj_request_count > 0);
2133 rbd_assert(which != BAD_WHICH);
2134 rbd_assert(which < img_request->obj_request_count);
2135 rbd_assert(which >= img_request->next_completion);
2137 spin_lock_irq(&img_request->completion_lock);
2138 if (which != img_request->next_completion)
2141 for_each_obj_request_from(img_request, obj_request) {
2143 rbd_assert(which < img_request->obj_request_count);
2145 if (!obj_request_done_test(obj_request))
2147 more = rbd_img_obj_end_request(obj_request);
2151 rbd_assert(more ^ (which == img_request->obj_request_count));
2152 img_request->next_completion = which;
2154 spin_unlock_irq(&img_request->completion_lock);
2157 rbd_img_request_complete(img_request);
2161 * Split up an image request into one or more object requests, each
2162 * to a different object. The "type" parameter indicates whether
2163 * "data_desc" is the pointer to the head of a list of bio
2164 * structures, or the base of a page array. In either case this
2165 * function assumes data_desc describes memory sufficient to hold
2166 * all data described by the image request.
2168 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2169 enum obj_request_type type,
2172 struct rbd_device *rbd_dev = img_request->rbd_dev;
2173 struct rbd_obj_request *obj_request = NULL;
2174 struct rbd_obj_request *next_obj_request;
2175 bool write_request = img_request_write_test(img_request);
2176 struct bio *bio_list;
2177 unsigned int bio_offset = 0;
2178 struct page **pages;
2183 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2184 (int)type, data_desc);
2186 opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
2187 img_offset = img_request->offset;
2188 resid = img_request->length;
2189 rbd_assert(resid > 0);
2191 if (type == OBJ_REQUEST_BIO) {
2192 bio_list = data_desc;
2193 rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
2195 rbd_assert(type == OBJ_REQUEST_PAGES);
2200 struct ceph_osd_request *osd_req;
2201 const char *object_name;
2205 object_name = rbd_segment_name(rbd_dev, img_offset);
2208 offset = rbd_segment_offset(rbd_dev, img_offset);
2209 length = rbd_segment_length(rbd_dev, img_offset, resid);
2210 obj_request = rbd_obj_request_create(object_name,
2211 offset, length, type);
2212 /* object request has its own copy of the object name */
2213 rbd_segment_name_free(object_name);
2217 if (type == OBJ_REQUEST_BIO) {
2218 unsigned int clone_size;
2220 rbd_assert(length <= (u64)UINT_MAX);
2221 clone_size = (unsigned int)length;
2222 obj_request->bio_list =
2223 bio_chain_clone_range(&bio_list,
2227 if (!obj_request->bio_list)
2230 unsigned int page_count;
2232 obj_request->pages = pages;
2233 page_count = (u32)calc_pages_for(offset, length);
2234 obj_request->page_count = page_count;
2235 if ((offset + length) & ~PAGE_MASK)
2236 page_count--; /* more on last page */
2237 pages += page_count;
2240 osd_req = rbd_osd_req_create(rbd_dev, write_request,
2244 obj_request->osd_req = osd_req;
2245 obj_request->callback = rbd_img_obj_callback;
2247 osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
2249 if (type == OBJ_REQUEST_BIO)
2250 osd_req_op_extent_osd_data_bio(osd_req, 0,
2251 obj_request->bio_list, length);
2253 osd_req_op_extent_osd_data_pages(osd_req, 0,
2254 obj_request->pages, length,
2255 offset & ~PAGE_MASK, false, false);
2258 * set obj_request->img_request before formatting
2259 * the osd_request so that it gets the right snapc
2261 rbd_img_obj_request_add(img_request, obj_request);
2263 rbd_osd_req_format_write(obj_request);
2265 rbd_osd_req_format_read(obj_request);
2267 obj_request->img_offset = img_offset;
2269 img_offset += length;
2276 rbd_obj_request_put(obj_request);
2278 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2279 rbd_obj_request_put(obj_request);
2285 rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2287 struct rbd_img_request *img_request;
2288 struct rbd_device *rbd_dev;
2289 struct page **pages;
2292 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2293 rbd_assert(obj_request_img_data_test(obj_request));
2294 img_request = obj_request->img_request;
2295 rbd_assert(img_request);
2297 rbd_dev = img_request->rbd_dev;
2298 rbd_assert(rbd_dev);
2300 pages = obj_request->copyup_pages;
2301 rbd_assert(pages != NULL);
2302 obj_request->copyup_pages = NULL;
2303 page_count = obj_request->copyup_page_count;
2304 rbd_assert(page_count);
2305 obj_request->copyup_page_count = 0;
2306 ceph_release_page_vector(pages, page_count);
2309 * We want the transfer count to reflect the size of the
2310 * original write request. There is no such thing as a
2311 * successful short write, so if the request was successful
2312 * we can just set it to the originally-requested length.
2314 if (!obj_request->result)
2315 obj_request->xferred = obj_request->length;
2317 /* Finish up with the normal image object callback */
2319 rbd_img_obj_callback(obj_request);
2323 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2325 struct rbd_obj_request *orig_request;
2326 struct ceph_osd_request *osd_req;
2327 struct ceph_osd_client *osdc;
2328 struct rbd_device *rbd_dev;
2329 struct page **pages;
2336 rbd_assert(img_request_child_test(img_request));
2338 /* First get what we need from the image request */
2340 pages = img_request->copyup_pages;
2341 rbd_assert(pages != NULL);
2342 img_request->copyup_pages = NULL;
2343 page_count = img_request->copyup_page_count;
2344 rbd_assert(page_count);
2345 img_request->copyup_page_count = 0;
2347 orig_request = img_request->obj_request;
2348 rbd_assert(orig_request != NULL);
2349 rbd_assert(obj_request_type_valid(orig_request->type));
2350 img_result = img_request->result;
2351 parent_length = img_request->length;
2352 rbd_assert(parent_length == img_request->xferred);
2353 rbd_img_request_put(img_request);
2355 rbd_assert(orig_request->img_request);
2356 rbd_dev = orig_request->img_request->rbd_dev;
2357 rbd_assert(rbd_dev);
2360 * If the overlap has become 0 (most likely because the
2361 * image has been flattened) we need to free the pages
2362 * and re-submit the original write request.
2364 if (!rbd_dev->parent_overlap) {
2365 struct ceph_osd_client *osdc;
2367 ceph_release_page_vector(pages, page_count);
2368 osdc = &rbd_dev->rbd_client->client->osdc;
2369 img_result = rbd_obj_request_submit(osdc, orig_request);
2378 * The original osd request is of no use to use any more.
2379 * We need a new one that can hold the two ops in a copyup
2380 * request. Allocate the new copyup osd request for the
2381 * original request, and release the old one.
2383 img_result = -ENOMEM;
2384 osd_req = rbd_osd_req_create_copyup(orig_request);
2387 rbd_osd_req_destroy(orig_request->osd_req);
2388 orig_request->osd_req = osd_req;
2389 orig_request->copyup_pages = pages;
2390 orig_request->copyup_page_count = page_count;
2392 /* Initialize the copyup op */
2394 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2395 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2398 /* Then the original write request op */
2400 offset = orig_request->offset;
2401 length = orig_request->length;
2402 osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
2403 offset, length, 0, 0);
2404 if (orig_request->type == OBJ_REQUEST_BIO)
2405 osd_req_op_extent_osd_data_bio(osd_req, 1,
2406 orig_request->bio_list, length);
2408 osd_req_op_extent_osd_data_pages(osd_req, 1,
2409 orig_request->pages, length,
2410 offset & ~PAGE_MASK, false, false);
2412 rbd_osd_req_format_write(orig_request);
2414 /* All set, send it off. */
2416 orig_request->callback = rbd_img_obj_copyup_callback;
2417 osdc = &rbd_dev->rbd_client->client->osdc;
2418 img_result = rbd_obj_request_submit(osdc, orig_request);
2422 /* Record the error code and complete the request */
2424 orig_request->result = img_result;
2425 orig_request->xferred = 0;
2426 obj_request_done_set(orig_request);
2427 rbd_obj_request_complete(orig_request);
2431 * Read from the parent image the range of data that covers the
2432 * entire target of the given object request. This is used for
2433 * satisfying a layered image write request when the target of an
2434 * object request from the image request does not exist.
2436 * A page array big enough to hold the returned data is allocated
2437 * and supplied to rbd_img_request_fill() as the "data descriptor."
2438 * When the read completes, this page array will be transferred to
2439 * the original object request for the copyup operation.
2441 * If an error occurs, record it as the result of the original
2442 * object request and mark it done so it gets completed.
2444 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2446 struct rbd_img_request *img_request = NULL;
2447 struct rbd_img_request *parent_request = NULL;
2448 struct rbd_device *rbd_dev;
2451 struct page **pages = NULL;
2455 rbd_assert(obj_request_img_data_test(obj_request));
2456 rbd_assert(obj_request_type_valid(obj_request->type));
2458 img_request = obj_request->img_request;
2459 rbd_assert(img_request != NULL);
2460 rbd_dev = img_request->rbd_dev;
2461 rbd_assert(rbd_dev->parent != NULL);
2464 * Determine the byte range covered by the object in the
2465 * child image to which the original request was to be sent.
2467 img_offset = obj_request->img_offset - obj_request->offset;
2468 length = (u64)1 << rbd_dev->header.obj_order;
2471 * There is no defined parent data beyond the parent
2472 * overlap, so limit what we read at that boundary if
2475 if (img_offset + length > rbd_dev->parent_overlap) {
2476 rbd_assert(img_offset < rbd_dev->parent_overlap);
2477 length = rbd_dev->parent_overlap - img_offset;
2481 * Allocate a page array big enough to receive the data read
2484 page_count = (u32)calc_pages_for(0, length);
2485 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2486 if (IS_ERR(pages)) {
2487 result = PTR_ERR(pages);
2493 parent_request = rbd_parent_request_create(obj_request,
2494 img_offset, length);
2495 if (!parent_request)
2498 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2501 parent_request->copyup_pages = pages;
2502 parent_request->copyup_page_count = page_count;
2504 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2505 result = rbd_img_request_submit(parent_request);
2509 parent_request->copyup_pages = NULL;
2510 parent_request->copyup_page_count = 0;
2511 parent_request->obj_request = NULL;
2512 rbd_obj_request_put(obj_request);
2515 ceph_release_page_vector(pages, page_count);
2517 rbd_img_request_put(parent_request);
2518 obj_request->result = result;
2519 obj_request->xferred = 0;
2520 obj_request_done_set(obj_request);
2525 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2527 struct rbd_obj_request *orig_request;
2528 struct rbd_device *rbd_dev;
2531 rbd_assert(!obj_request_img_data_test(obj_request));
2534 * All we need from the object request is the original
2535 * request and the result of the STAT op. Grab those, then
2536 * we're done with the request.
2538 orig_request = obj_request->obj_request;
2539 obj_request->obj_request = NULL;
2540 rbd_obj_request_put(orig_request);
2541 rbd_assert(orig_request);
2542 rbd_assert(orig_request->img_request);
2544 result = obj_request->result;
2545 obj_request->result = 0;
2547 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2548 obj_request, orig_request, result,
2549 obj_request->xferred, obj_request->length);
2550 rbd_obj_request_put(obj_request);
2553 * If the overlap has become 0 (most likely because the
2554 * image has been flattened) we need to free the pages
2555 * and re-submit the original write request.
2557 rbd_dev = orig_request->img_request->rbd_dev;
2558 if (!rbd_dev->parent_overlap) {
2559 struct ceph_osd_client *osdc;
2561 osdc = &rbd_dev->rbd_client->client->osdc;
2562 result = rbd_obj_request_submit(osdc, orig_request);
2568 * Our only purpose here is to determine whether the object
2569 * exists, and we don't want to treat the non-existence as
2570 * an error. If something else comes back, transfer the
2571 * error to the original request and complete it now.
2574 obj_request_existence_set(orig_request, true);
2575 } else if (result == -ENOENT) {
2576 obj_request_existence_set(orig_request, false);
2577 } else if (result) {
2578 orig_request->result = result;
2583 * Resubmit the original request now that we have recorded
2584 * whether the target object exists.
2586 orig_request->result = rbd_img_obj_request_submit(orig_request);
2588 if (orig_request->result)
2589 rbd_obj_request_complete(orig_request);
2592 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2594 struct rbd_obj_request *stat_request;
2595 struct rbd_device *rbd_dev;
2596 struct ceph_osd_client *osdc;
2597 struct page **pages = NULL;
2603 * The response data for a STAT call consists of:
2610 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2611 page_count = (u32)calc_pages_for(0, size);
2612 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2614 return PTR_ERR(pages);
2617 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2622 rbd_obj_request_get(obj_request);
2623 stat_request->obj_request = obj_request;
2624 stat_request->pages = pages;
2625 stat_request->page_count = page_count;
2627 rbd_assert(obj_request->img_request);
2628 rbd_dev = obj_request->img_request->rbd_dev;
2629 stat_request->osd_req = rbd_osd_req_create(rbd_dev, false,
2631 if (!stat_request->osd_req)
2633 stat_request->callback = rbd_img_obj_exists_callback;
2635 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2636 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2638 rbd_osd_req_format_read(stat_request);
2640 osdc = &rbd_dev->rbd_client->client->osdc;
2641 ret = rbd_obj_request_submit(osdc, stat_request);
2644 rbd_obj_request_put(obj_request);
2649 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2651 struct rbd_img_request *img_request;
2652 struct rbd_device *rbd_dev;
2655 rbd_assert(obj_request_img_data_test(obj_request));
2657 img_request = obj_request->img_request;
2658 rbd_assert(img_request);
2659 rbd_dev = img_request->rbd_dev;
2662 * Only writes to layered images need special handling.
2663 * Reads and non-layered writes are simple object requests.
2664 * Layered writes that start beyond the end of the overlap
2665 * with the parent have no parent data, so they too are
2666 * simple object requests. Finally, if the target object is
2667 * known to already exist, its parent data has already been
2668 * copied, so a write to the object can also be handled as a
2669 * simple object request.
2671 if (!img_request_write_test(img_request) ||
2672 !img_request_layered_test(img_request) ||
2673 rbd_dev->parent_overlap <= obj_request->img_offset ||
2674 ((known = obj_request_known_test(obj_request)) &&
2675 obj_request_exists_test(obj_request))) {
2677 struct rbd_device *rbd_dev;
2678 struct ceph_osd_client *osdc;
2680 rbd_dev = obj_request->img_request->rbd_dev;
2681 osdc = &rbd_dev->rbd_client->client->osdc;
2683 return rbd_obj_request_submit(osdc, obj_request);
2687 * It's a layered write. The target object might exist but
2688 * we may not know that yet. If we know it doesn't exist,
2689 * start by reading the data for the full target object from
2690 * the parent so we can use it for a copyup to the target.
2693 return rbd_img_obj_parent_read_full(obj_request);
2695 /* We don't know whether the target exists. Go find out. */
2697 return rbd_img_obj_exists_submit(obj_request);
2700 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2702 struct rbd_obj_request *obj_request;
2703 struct rbd_obj_request *next_obj_request;
2705 dout("%s: img %p\n", __func__, img_request);
2706 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2709 ret = rbd_img_obj_request_submit(obj_request);
2717 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2719 struct rbd_obj_request *obj_request;
2720 struct rbd_device *rbd_dev;
2725 rbd_assert(img_request_child_test(img_request));
2727 /* First get what we need from the image request and release it */
2729 obj_request = img_request->obj_request;
2730 img_xferred = img_request->xferred;
2731 img_result = img_request->result;
2732 rbd_img_request_put(img_request);
2735 * If the overlap has become 0 (most likely because the
2736 * image has been flattened) we need to re-submit the
2739 rbd_assert(obj_request);
2740 rbd_assert(obj_request->img_request);
2741 rbd_dev = obj_request->img_request->rbd_dev;
2742 if (!rbd_dev->parent_overlap) {
2743 struct ceph_osd_client *osdc;
2745 osdc = &rbd_dev->rbd_client->client->osdc;
2746 img_result = rbd_obj_request_submit(osdc, obj_request);
2751 obj_request->result = img_result;
2752 if (obj_request->result)
2756 * We need to zero anything beyond the parent overlap
2757 * boundary. Since rbd_img_obj_request_read_callback()
2758 * will zero anything beyond the end of a short read, an
2759 * easy way to do this is to pretend the data from the
2760 * parent came up short--ending at the overlap boundary.
2762 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2763 obj_end = obj_request->img_offset + obj_request->length;
2764 if (obj_end > rbd_dev->parent_overlap) {
2767 if (obj_request->img_offset < rbd_dev->parent_overlap)
2768 xferred = rbd_dev->parent_overlap -
2769 obj_request->img_offset;
2771 obj_request->xferred = min(img_xferred, xferred);
2773 obj_request->xferred = img_xferred;
2776 rbd_img_obj_request_read_callback(obj_request);
2777 rbd_obj_request_complete(obj_request);
2780 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2782 struct rbd_img_request *img_request;
2785 rbd_assert(obj_request_img_data_test(obj_request));
2786 rbd_assert(obj_request->img_request != NULL);
2787 rbd_assert(obj_request->result == (s32) -ENOENT);
2788 rbd_assert(obj_request_type_valid(obj_request->type));
2790 /* rbd_read_finish(obj_request, obj_request->length); */
2791 img_request = rbd_parent_request_create(obj_request,
2792 obj_request->img_offset,
2793 obj_request->length);
2798 if (obj_request->type == OBJ_REQUEST_BIO)
2799 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2800 obj_request->bio_list);
2802 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
2803 obj_request->pages);
2807 img_request->callback = rbd_img_parent_read_callback;
2808 result = rbd_img_request_submit(img_request);
2815 rbd_img_request_put(img_request);
2816 obj_request->result = result;
2817 obj_request->xferred = 0;
2818 obj_request_done_set(obj_request);
2821 static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id)
2823 struct rbd_obj_request *obj_request;
2824 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2827 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2828 OBJ_REQUEST_NODATA);
2833 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2834 if (!obj_request->osd_req)
2836 obj_request->callback = rbd_obj_request_put;
2838 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
2840 rbd_osd_req_format_read(obj_request);
2842 ret = rbd_obj_request_submit(osdc, obj_request);
2845 rbd_obj_request_put(obj_request);
2850 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2852 struct rbd_device *rbd_dev = (struct rbd_device *)data;
2858 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
2859 rbd_dev->header_name, (unsigned long long)notify_id,
2860 (unsigned int)opcode);
2861 ret = rbd_dev_refresh(rbd_dev);
2863 rbd_warn(rbd_dev, "header refresh error (%d)\n", ret);
2865 rbd_obj_notify_ack(rbd_dev, notify_id);
2869 * Request sync osd watch/unwatch. The value of "start" determines
2870 * whether a watch request is being initiated or torn down.
2872 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start)
2874 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2875 struct rbd_obj_request *obj_request;
2878 rbd_assert(start ^ !!rbd_dev->watch_event);
2879 rbd_assert(start ^ !!rbd_dev->watch_request);
2882 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
2883 &rbd_dev->watch_event);
2886 rbd_assert(rbd_dev->watch_event != NULL);
2890 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2891 OBJ_REQUEST_NODATA);
2895 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request);
2896 if (!obj_request->osd_req)
2900 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
2902 ceph_osdc_unregister_linger_request(osdc,
2903 rbd_dev->watch_request->osd_req);
2905 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2906 rbd_dev->watch_event->cookie, 0, start ? 1 : 0);
2907 rbd_osd_req_format_write(obj_request);
2909 ret = rbd_obj_request_submit(osdc, obj_request);
2912 ret = rbd_obj_request_wait(obj_request);
2915 ret = obj_request->result;
2920 * A watch request is set to linger, so the underlying osd
2921 * request won't go away until we unregister it. We retain
2922 * a pointer to the object request during that time (in
2923 * rbd_dev->watch_request), so we'll keep a reference to
2924 * it. We'll drop that reference (below) after we've
2928 rbd_dev->watch_request = obj_request;
2933 /* We have successfully torn down the watch request */
2935 rbd_obj_request_put(rbd_dev->watch_request);
2936 rbd_dev->watch_request = NULL;
2938 /* Cancel the event if we're tearing down, or on error */
2939 ceph_osdc_cancel_event(rbd_dev->watch_event);
2940 rbd_dev->watch_event = NULL;
2942 rbd_obj_request_put(obj_request);
2948 * Synchronous osd object method call. Returns the number of bytes
2949 * returned in the outbound buffer, or a negative error code.
2951 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
2952 const char *object_name,
2953 const char *class_name,
2954 const char *method_name,
2955 const void *outbound,
2956 size_t outbound_size,
2958 size_t inbound_size)
2960 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2961 struct rbd_obj_request *obj_request;
2962 struct page **pages;
2967 * Method calls are ultimately read operations. The result
2968 * should placed into the inbound buffer provided. They
2969 * also supply outbound data--parameters for the object
2970 * method. Currently if this is present it will be a
2973 page_count = (u32)calc_pages_for(0, inbound_size);
2974 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2976 return PTR_ERR(pages);
2979 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
2984 obj_request->pages = pages;
2985 obj_request->page_count = page_count;
2987 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2988 if (!obj_request->osd_req)
2991 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
2992 class_name, method_name);
2993 if (outbound_size) {
2994 struct ceph_pagelist *pagelist;
2996 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
3000 ceph_pagelist_init(pagelist);
3001 ceph_pagelist_append(pagelist, outbound, outbound_size);
3002 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3005 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3006 obj_request->pages, inbound_size,
3008 rbd_osd_req_format_read(obj_request);
3010 ret = rbd_obj_request_submit(osdc, obj_request);
3013 ret = rbd_obj_request_wait(obj_request);
3017 ret = obj_request->result;
3021 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3022 ret = (int)obj_request->xferred;
3023 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3026 rbd_obj_request_put(obj_request);
3028 ceph_release_page_vector(pages, page_count);
3033 static void rbd_request_fn(struct request_queue *q)
3034 __releases(q->queue_lock) __acquires(q->queue_lock)
3036 struct rbd_device *rbd_dev = q->queuedata;
3037 bool read_only = rbd_dev->mapping.read_only;
3041 while ((rq = blk_fetch_request(q))) {
3042 bool write_request = rq_data_dir(rq) == WRITE;
3043 struct rbd_img_request *img_request;
3047 /* Ignore any non-FS requests that filter through. */
3049 if (rq->cmd_type != REQ_TYPE_FS) {
3050 dout("%s: non-fs request type %d\n", __func__,
3051 (int) rq->cmd_type);
3052 __blk_end_request_all(rq, 0);
3056 /* Ignore/skip any zero-length requests */
3058 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
3059 length = (u64) blk_rq_bytes(rq);
3062 dout("%s: zero-length request\n", __func__);
3063 __blk_end_request_all(rq, 0);
3067 spin_unlock_irq(q->queue_lock);
3069 /* Disallow writes to a read-only device */
3071 if (write_request) {
3075 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3079 * Quit early if the mapped snapshot no longer
3080 * exists. It's still possible the snapshot will
3081 * have disappeared by the time our request arrives
3082 * at the osd, but there's no sense in sending it if
3085 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3086 dout("request for non-existent snapshot");
3087 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3093 if (offset && length > U64_MAX - offset + 1) {
3094 rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
3096 goto end_request; /* Shouldn't happen */
3100 if (offset + length > rbd_dev->mapping.size) {
3101 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
3102 offset, length, rbd_dev->mapping.size);
3107 img_request = rbd_img_request_create(rbd_dev, offset, length,
3112 img_request->rq = rq;
3114 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3117 result = rbd_img_request_submit(img_request);
3119 rbd_img_request_put(img_request);
3121 spin_lock_irq(q->queue_lock);
3123 rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
3124 write_request ? "write" : "read",
3125 length, offset, result);
3127 __blk_end_request_all(rq, result);
3133 * a queue callback. Makes sure that we don't create a bio that spans across
3134 * multiple osd objects. One exception would be with a single page bios,
3135 * which we handle later at bio_chain_clone_range()
3137 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
3138 struct bio_vec *bvec)
3140 struct rbd_device *rbd_dev = q->queuedata;
3141 sector_t sector_offset;
3142 sector_t sectors_per_obj;
3143 sector_t obj_sector_offset;
3147 * Find how far into its rbd object the partition-relative
3148 * bio start sector is to offset relative to the enclosing
3151 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
3152 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
3153 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
3156 * Compute the number of bytes from that offset to the end
3157 * of the object. Account for what's already used by the bio.
3159 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
3160 if (ret > bmd->bi_size)
3161 ret -= bmd->bi_size;
3166 * Don't send back more than was asked for. And if the bio
3167 * was empty, let the whole thing through because: "Note
3168 * that a block device *must* allow a single page to be
3169 * added to an empty bio."
3171 rbd_assert(bvec->bv_len <= PAGE_SIZE);
3172 if (ret > (int) bvec->bv_len || !bmd->bi_size)
3173 ret = (int) bvec->bv_len;
3178 static void rbd_free_disk(struct rbd_device *rbd_dev)
3180 struct gendisk *disk = rbd_dev->disk;
3185 rbd_dev->disk = NULL;
3186 if (disk->flags & GENHD_FL_UP) {
3189 blk_cleanup_queue(disk->queue);
3194 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3195 const char *object_name,
3196 u64 offset, u64 length, void *buf)
3199 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3200 struct rbd_obj_request *obj_request;
3201 struct page **pages = NULL;
3206 page_count = (u32) calc_pages_for(offset, length);
3207 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3209 ret = PTR_ERR(pages);
3212 obj_request = rbd_obj_request_create(object_name, offset, length,
3217 obj_request->pages = pages;
3218 obj_request->page_count = page_count;
3220 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
3221 if (!obj_request->osd_req)
3224 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3225 offset, length, 0, 0);
3226 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3228 obj_request->length,
3229 obj_request->offset & ~PAGE_MASK,
3231 rbd_osd_req_format_read(obj_request);
3233 ret = rbd_obj_request_submit(osdc, obj_request);
3236 ret = rbd_obj_request_wait(obj_request);
3240 ret = obj_request->result;
3244 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3245 size = (size_t) obj_request->xferred;
3246 ceph_copy_from_page_vector(pages, buf, 0, size);
3247 rbd_assert(size <= (size_t)INT_MAX);
3251 rbd_obj_request_put(obj_request);
3253 ceph_release_page_vector(pages, page_count);
3259 * Read the complete header for the given rbd device. On successful
3260 * return, the rbd_dev->header field will contain up-to-date
3261 * information about the image.
3263 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3265 struct rbd_image_header_ondisk *ondisk = NULL;
3272 * The complete header will include an array of its 64-bit
3273 * snapshot ids, followed by the names of those snapshots as
3274 * a contiguous block of NUL-terminated strings. Note that
3275 * the number of snapshots could change by the time we read
3276 * it in, in which case we re-read it.
3283 size = sizeof (*ondisk);
3284 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3286 ondisk = kmalloc(size, GFP_KERNEL);
3290 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3294 if ((size_t)ret < size) {
3296 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3300 if (!rbd_dev_ondisk_valid(ondisk)) {
3302 rbd_warn(rbd_dev, "invalid header");
3306 names_size = le64_to_cpu(ondisk->snap_names_len);
3307 want_count = snap_count;
3308 snap_count = le32_to_cpu(ondisk->snap_count);
3309 } while (snap_count != want_count);
3311 ret = rbd_header_from_disk(rbd_dev, ondisk);
3319 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3320 * has disappeared from the (just updated) snapshot context.
3322 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3326 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3329 snap_id = rbd_dev->spec->snap_id;
3330 if (snap_id == CEPH_NOSNAP)
3333 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3334 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3337 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3342 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
3343 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
3344 mapping_size = rbd_dev->mapping.size;
3345 if (rbd_dev->image_format == 1)
3346 ret = rbd_dev_v1_header_info(rbd_dev);
3348 ret = rbd_dev_v2_header_info(rbd_dev);
3350 /* If it's a mapped snapshot, validate its EXISTS flag */
3352 rbd_exists_validate(rbd_dev);
3353 mutex_unlock(&ctl_mutex);
3354 if (mapping_size != rbd_dev->mapping.size) {
3357 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3358 dout("setting size to %llu sectors", (unsigned long long)size);
3359 set_capacity(rbd_dev->disk, size);
3360 revalidate_disk(rbd_dev->disk);
3366 static int rbd_init_disk(struct rbd_device *rbd_dev)
3368 struct gendisk *disk;
3369 struct request_queue *q;
3372 /* create gendisk info */
3373 disk = alloc_disk(RBD_MINORS_PER_MAJOR);
3377 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3379 disk->major = rbd_dev->major;
3380 disk->first_minor = 0;
3381 disk->fops = &rbd_bd_ops;
3382 disk->private_data = rbd_dev;
3384 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
3388 /* We use the default size, but let's be explicit about it. */
3389 blk_queue_physical_block_size(q, SECTOR_SIZE);
3391 /* set io sizes to object size */
3392 segment_size = rbd_obj_bytes(&rbd_dev->header);
3393 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3394 blk_queue_max_segment_size(q, segment_size);
3395 blk_queue_io_min(q, segment_size);
3396 blk_queue_io_opt(q, segment_size);
3398 blk_queue_merge_bvec(q, rbd_merge_bvec);
3401 q->queuedata = rbd_dev;
3403 rbd_dev->disk = disk;
3416 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3418 return container_of(dev, struct rbd_device, dev);
3421 static ssize_t rbd_size_show(struct device *dev,
3422 struct device_attribute *attr, char *buf)
3424 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3426 return sprintf(buf, "%llu\n",
3427 (unsigned long long)rbd_dev->mapping.size);
3431 * Note this shows the features for whatever's mapped, which is not
3432 * necessarily the base image.
3434 static ssize_t rbd_features_show(struct device *dev,
3435 struct device_attribute *attr, char *buf)
3437 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3439 return sprintf(buf, "0x%016llx\n",
3440 (unsigned long long)rbd_dev->mapping.features);
3443 static ssize_t rbd_major_show(struct device *dev,
3444 struct device_attribute *attr, char *buf)
3446 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3449 return sprintf(buf, "%d\n", rbd_dev->major);
3451 return sprintf(buf, "(none)\n");
3455 static ssize_t rbd_client_id_show(struct device *dev,
3456 struct device_attribute *attr, char *buf)
3458 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3460 return sprintf(buf, "client%lld\n",
3461 ceph_client_id(rbd_dev->rbd_client->client));
3464 static ssize_t rbd_pool_show(struct device *dev,
3465 struct device_attribute *attr, char *buf)
3467 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3469 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3472 static ssize_t rbd_pool_id_show(struct device *dev,
3473 struct device_attribute *attr, char *buf)
3475 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3477 return sprintf(buf, "%llu\n",
3478 (unsigned long long) rbd_dev->spec->pool_id);
3481 static ssize_t rbd_name_show(struct device *dev,
3482 struct device_attribute *attr, char *buf)
3484 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3486 if (rbd_dev->spec->image_name)
3487 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3489 return sprintf(buf, "(unknown)\n");
3492 static ssize_t rbd_image_id_show(struct device *dev,
3493 struct device_attribute *attr, char *buf)
3495 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3497 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3501 * Shows the name of the currently-mapped snapshot (or
3502 * RBD_SNAP_HEAD_NAME for the base image).
3504 static ssize_t rbd_snap_show(struct device *dev,
3505 struct device_attribute *attr,
3508 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3510 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3514 * For an rbd v2 image, shows the pool id, image id, and snapshot id
3515 * for the parent image. If there is no parent, simply shows
3516 * "(no parent image)".
3518 static ssize_t rbd_parent_show(struct device *dev,
3519 struct device_attribute *attr,
3522 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3523 struct rbd_spec *spec = rbd_dev->parent_spec;
3528 return sprintf(buf, "(no parent image)\n");
3530 count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
3531 (unsigned long long) spec->pool_id, spec->pool_name);
3536 count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
3537 spec->image_name ? spec->image_name : "(unknown)");
3542 count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
3543 (unsigned long long) spec->snap_id, spec->snap_name);
3548 count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
3553 return (ssize_t) (bufp - buf);
3556 static ssize_t rbd_image_refresh(struct device *dev,
3557 struct device_attribute *attr,
3561 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3564 ret = rbd_dev_refresh(rbd_dev);
3566 rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret);
3568 return ret < 0 ? ret : size;
3571 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3572 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3573 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3574 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3575 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3576 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3577 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3578 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3579 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3580 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3581 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3583 static struct attribute *rbd_attrs[] = {
3584 &dev_attr_size.attr,
3585 &dev_attr_features.attr,
3586 &dev_attr_major.attr,
3587 &dev_attr_client_id.attr,
3588 &dev_attr_pool.attr,
3589 &dev_attr_pool_id.attr,
3590 &dev_attr_name.attr,
3591 &dev_attr_image_id.attr,
3592 &dev_attr_current_snap.attr,
3593 &dev_attr_parent.attr,
3594 &dev_attr_refresh.attr,
3598 static struct attribute_group rbd_attr_group = {
3602 static const struct attribute_group *rbd_attr_groups[] = {
3607 static void rbd_sysfs_dev_release(struct device *dev)
3611 static struct device_type rbd_device_type = {
3613 .groups = rbd_attr_groups,
3614 .release = rbd_sysfs_dev_release,
3617 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3619 kref_get(&spec->kref);
3624 static void rbd_spec_free(struct kref *kref);
3625 static void rbd_spec_put(struct rbd_spec *spec)
3628 kref_put(&spec->kref, rbd_spec_free);
3631 static struct rbd_spec *rbd_spec_alloc(void)
3633 struct rbd_spec *spec;
3635 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3638 kref_init(&spec->kref);
3643 static void rbd_spec_free(struct kref *kref)
3645 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3647 kfree(spec->pool_name);
3648 kfree(spec->image_id);
3649 kfree(spec->image_name);
3650 kfree(spec->snap_name);
3654 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
3655 struct rbd_spec *spec)
3657 struct rbd_device *rbd_dev;
3659 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3663 spin_lock_init(&rbd_dev->lock);
3665 atomic_set(&rbd_dev->parent_ref, 0);
3666 INIT_LIST_HEAD(&rbd_dev->node);
3667 init_rwsem(&rbd_dev->header_rwsem);
3669 rbd_dev->spec = spec;
3670 rbd_dev->rbd_client = rbdc;
3672 /* Initialize the layout used for all rbd requests */
3674 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3675 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3676 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3677 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3682 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3684 rbd_put_client(rbd_dev->rbd_client);
3685 rbd_spec_put(rbd_dev->spec);
3690 * Get the size and object order for an image snapshot, or if
3691 * snap_id is CEPH_NOSNAP, gets this information for the base
3694 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3695 u8 *order, u64 *snap_size)
3697 __le64 snapid = cpu_to_le64(snap_id);
3702 } __attribute__ ((packed)) size_buf = { 0 };
3704 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3706 &snapid, sizeof (snapid),
3707 &size_buf, sizeof (size_buf));
3708 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3711 if (ret < sizeof (size_buf))
3715 *order = size_buf.order;
3716 *snap_size = le64_to_cpu(size_buf.size);
3718 dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n",
3719 (unsigned long long)snap_id, (unsigned int)*order,
3720 (unsigned long long)*snap_size);
3725 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3727 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3728 &rbd_dev->header.obj_order,
3729 &rbd_dev->header.image_size);
3732 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3738 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
3742 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3743 "rbd", "get_object_prefix", NULL, 0,
3744 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
3745 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3750 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
3751 p + ret, NULL, GFP_NOIO);
3754 if (IS_ERR(rbd_dev->header.object_prefix)) {
3755 ret = PTR_ERR(rbd_dev->header.object_prefix);
3756 rbd_dev->header.object_prefix = NULL;
3758 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
3766 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
3769 __le64 snapid = cpu_to_le64(snap_id);
3773 } __attribute__ ((packed)) features_buf = { 0 };
3777 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3778 "rbd", "get_features",
3779 &snapid, sizeof (snapid),
3780 &features_buf, sizeof (features_buf));
3781 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3784 if (ret < sizeof (features_buf))
3787 incompat = le64_to_cpu(features_buf.incompat);
3788 if (incompat & ~RBD_FEATURES_SUPPORTED)
3791 *snap_features = le64_to_cpu(features_buf.features);
3793 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
3794 (unsigned long long)snap_id,
3795 (unsigned long long)*snap_features,
3796 (unsigned long long)le64_to_cpu(features_buf.incompat));
3801 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
3803 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
3804 &rbd_dev->header.features);
3807 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3809 struct rbd_spec *parent_spec;
3811 void *reply_buf = NULL;
3821 parent_spec = rbd_spec_alloc();
3825 size = sizeof (__le64) + /* pool_id */
3826 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
3827 sizeof (__le64) + /* snap_id */
3828 sizeof (__le64); /* overlap */
3829 reply_buf = kmalloc(size, GFP_KERNEL);
3835 snapid = cpu_to_le64(CEPH_NOSNAP);
3836 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3837 "rbd", "get_parent",
3838 &snapid, sizeof (snapid),
3840 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3845 end = reply_buf + ret;
3847 ceph_decode_64_safe(&p, end, pool_id, out_err);
3848 if (pool_id == CEPH_NOPOOL) {
3850 * Either the parent never existed, or we have
3851 * record of it but the image got flattened so it no
3852 * longer has a parent. When the parent of a
3853 * layered image disappears we immediately set the
3854 * overlap to 0. The effect of this is that all new
3855 * requests will be treated as if the image had no
3858 if (rbd_dev->parent_overlap) {
3859 rbd_dev->parent_overlap = 0;
3861 rbd_dev_parent_put(rbd_dev);
3862 pr_info("%s: clone image has been flattened\n",
3863 rbd_dev->disk->disk_name);
3866 goto out; /* No parent? No problem. */
3869 /* The ceph file layout needs to fit pool id in 32 bits */
3872 if (pool_id > (u64)U32_MAX) {
3873 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
3874 (unsigned long long)pool_id, U32_MAX);
3878 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3879 if (IS_ERR(image_id)) {
3880 ret = PTR_ERR(image_id);
3883 ceph_decode_64_safe(&p, end, snap_id, out_err);
3884 ceph_decode_64_safe(&p, end, overlap, out_err);
3887 * The parent won't change (except when the clone is
3888 * flattened, already handled that). So we only need to
3889 * record the parent spec we have not already done so.
3891 if (!rbd_dev->parent_spec) {
3892 parent_spec->pool_id = pool_id;
3893 parent_spec->image_id = image_id;
3894 parent_spec->snap_id = snap_id;
3895 rbd_dev->parent_spec = parent_spec;
3896 parent_spec = NULL; /* rbd_dev now owns this */
3900 * We always update the parent overlap. If it's zero we
3901 * treat it specially.
3903 rbd_dev->parent_overlap = overlap;
3907 /* A null parent_spec indicates it's the initial probe */
3911 * The overlap has become zero, so the clone
3912 * must have been resized down to 0 at some
3913 * point. Treat this the same as a flatten.
3915 rbd_dev_parent_put(rbd_dev);
3916 pr_info("%s: clone image now standalone\n",
3917 rbd_dev->disk->disk_name);
3920 * For the initial probe, if we find the
3921 * overlap is zero we just pretend there was
3924 rbd_warn(rbd_dev, "ignoring parent of "
3925 "clone with overlap 0\n");
3932 rbd_spec_put(parent_spec);
3937 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
3941 __le64 stripe_count;
3942 } __attribute__ ((packed)) striping_info_buf = { 0 };
3943 size_t size = sizeof (striping_info_buf);
3950 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3951 "rbd", "get_stripe_unit_count", NULL, 0,
3952 (char *)&striping_info_buf, size);
3953 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3960 * We don't actually support the "fancy striping" feature
3961 * (STRIPINGV2) yet, but if the striping sizes are the
3962 * defaults the behavior is the same as before. So find
3963 * out, and only fail if the image has non-default values.
3966 obj_size = (u64)1 << rbd_dev->header.obj_order;
3967 p = &striping_info_buf;
3968 stripe_unit = ceph_decode_64(&p);
3969 if (stripe_unit != obj_size) {
3970 rbd_warn(rbd_dev, "unsupported stripe unit "
3971 "(got %llu want %llu)",
3972 stripe_unit, obj_size);
3975 stripe_count = ceph_decode_64(&p);
3976 if (stripe_count != 1) {
3977 rbd_warn(rbd_dev, "unsupported stripe count "
3978 "(got %llu want 1)", stripe_count);
3981 rbd_dev->header.stripe_unit = stripe_unit;
3982 rbd_dev->header.stripe_count = stripe_count;
3987 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
3989 size_t image_id_size;
3994 void *reply_buf = NULL;
3996 char *image_name = NULL;
3999 rbd_assert(!rbd_dev->spec->image_name);
4001 len = strlen(rbd_dev->spec->image_id);
4002 image_id_size = sizeof (__le32) + len;
4003 image_id = kmalloc(image_id_size, GFP_KERNEL);
4008 end = image_id + image_id_size;
4009 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4011 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4012 reply_buf = kmalloc(size, GFP_KERNEL);
4016 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
4017 "rbd", "dir_get_name",
4018 image_id, image_id_size,
4023 end = reply_buf + ret;
4025 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4026 if (IS_ERR(image_name))
4029 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4037 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4039 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4040 const char *snap_name;
4043 /* Skip over names until we find the one we are looking for */
4045 snap_name = rbd_dev->header.snap_names;
4046 while (which < snapc->num_snaps) {
4047 if (!strcmp(name, snap_name))
4048 return snapc->snaps[which];
4049 snap_name += strlen(snap_name) + 1;
4055 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4057 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4062 for (which = 0; !found && which < snapc->num_snaps; which++) {
4063 const char *snap_name;
4065 snap_id = snapc->snaps[which];
4066 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4067 if (IS_ERR(snap_name))
4069 found = !strcmp(name, snap_name);
4072 return found ? snap_id : CEPH_NOSNAP;
4076 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4077 * no snapshot by that name is found, or if an error occurs.
4079 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4081 if (rbd_dev->image_format == 1)
4082 return rbd_v1_snap_id_by_name(rbd_dev, name);
4084 return rbd_v2_snap_id_by_name(rbd_dev, name);
4088 * When an rbd image has a parent image, it is identified by the
4089 * pool, image, and snapshot ids (not names). This function fills
4090 * in the names for those ids. (It's OK if we can't figure out the
4091 * name for an image id, but the pool and snapshot ids should always
4092 * exist and have names.) All names in an rbd spec are dynamically
4095 * When an image being mapped (not a parent) is probed, we have the
4096 * pool name and pool id, image name and image id, and the snapshot
4097 * name. The only thing we're missing is the snapshot id.
4099 static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
4101 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4102 struct rbd_spec *spec = rbd_dev->spec;
4103 const char *pool_name;
4104 const char *image_name;
4105 const char *snap_name;
4109 * An image being mapped will have the pool name (etc.), but
4110 * we need to look up the snapshot id.
4112 if (spec->pool_name) {
4113 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4116 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4117 if (snap_id == CEPH_NOSNAP)
4119 spec->snap_id = snap_id;
4121 spec->snap_id = CEPH_NOSNAP;
4127 /* Get the pool name; we have to make our own copy of this */
4129 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4131 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4134 pool_name = kstrdup(pool_name, GFP_KERNEL);
4138 /* Fetch the image name; tolerate failure here */
4140 image_name = rbd_dev_image_name(rbd_dev);
4142 rbd_warn(rbd_dev, "unable to get image name");
4144 /* Look up the snapshot name, and make a copy */
4146 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4152 spec->pool_name = pool_name;
4153 spec->image_name = image_name;
4154 spec->snap_name = snap_name;
4164 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4173 struct ceph_snap_context *snapc;
4177 * We'll need room for the seq value (maximum snapshot id),
4178 * snapshot count, and array of that many snapshot ids.
4179 * For now we have a fixed upper limit on the number we're
4180 * prepared to receive.
4182 size = sizeof (__le64) + sizeof (__le32) +
4183 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4184 reply_buf = kzalloc(size, GFP_KERNEL);
4188 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4189 "rbd", "get_snapcontext", NULL, 0,
4191 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4196 end = reply_buf + ret;
4198 ceph_decode_64_safe(&p, end, seq, out);
4199 ceph_decode_32_safe(&p, end, snap_count, out);
4202 * Make sure the reported number of snapshot ids wouldn't go
4203 * beyond the end of our buffer. But before checking that,
4204 * make sure the computed size of the snapshot context we
4205 * allocate is representable in a size_t.
4207 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4212 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4216 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4222 for (i = 0; i < snap_count; i++)
4223 snapc->snaps[i] = ceph_decode_64(&p);
4225 ceph_put_snap_context(rbd_dev->header.snapc);
4226 rbd_dev->header.snapc = snapc;
4228 dout(" snap context seq = %llu, snap_count = %u\n",
4229 (unsigned long long)seq, (unsigned int)snap_count);
4236 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4247 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4248 reply_buf = kmalloc(size, GFP_KERNEL);
4250 return ERR_PTR(-ENOMEM);
4252 snapid = cpu_to_le64(snap_id);
4253 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4254 "rbd", "get_snapshot_name",
4255 &snapid, sizeof (snapid),
4257 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4259 snap_name = ERR_PTR(ret);
4264 end = reply_buf + ret;
4265 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4266 if (IS_ERR(snap_name))
4269 dout(" snap_id 0x%016llx snap_name = %s\n",
4270 (unsigned long long)snap_id, snap_name);
4277 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4279 bool first_time = rbd_dev->header.object_prefix == NULL;
4282 down_write(&rbd_dev->header_rwsem);
4284 ret = rbd_dev_v2_image_size(rbd_dev);
4289 ret = rbd_dev_v2_header_onetime(rbd_dev);
4295 * If the image supports layering, get the parent info. We
4296 * need to probe the first time regardless. Thereafter we
4297 * only need to if there's a parent, to see if it has
4298 * disappeared due to the mapped image getting flattened.
4300 if (rbd_dev->header.features & RBD_FEATURE_LAYERING &&
4301 (first_time || rbd_dev->parent_spec)) {
4304 ret = rbd_dev_v2_parent_info(rbd_dev);
4309 * Print a warning if this is the initial probe and
4310 * the image has a parent. Don't print it if the
4311 * image now being probed is itself a parent. We
4312 * can tell at this point because we won't know its
4313 * pool name yet (just its pool id).
4315 warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name;
4316 if (first_time && warn)
4317 rbd_warn(rbd_dev, "WARNING: kernel layering "
4318 "is EXPERIMENTAL!");
4321 if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
4322 if (rbd_dev->mapping.size != rbd_dev->header.image_size)
4323 rbd_dev->mapping.size = rbd_dev->header.image_size;
4325 ret = rbd_dev_v2_snap_context(rbd_dev);
4326 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4328 up_write(&rbd_dev->header_rwsem);
4333 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4338 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
4340 dev = &rbd_dev->dev;
4341 dev->bus = &rbd_bus_type;
4342 dev->type = &rbd_device_type;
4343 dev->parent = &rbd_root_dev;
4344 dev->release = rbd_dev_device_release;
4345 dev_set_name(dev, "%d", rbd_dev->dev_id);
4346 ret = device_register(dev);
4348 mutex_unlock(&ctl_mutex);
4353 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4355 device_unregister(&rbd_dev->dev);
4358 static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
4361 * Get a unique rbd identifier for the given new rbd_dev, and add
4362 * the rbd_dev to the global list. The minimum rbd id is 1.
4364 static void rbd_dev_id_get(struct rbd_device *rbd_dev)
4366 rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
4368 spin_lock(&rbd_dev_list_lock);
4369 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4370 spin_unlock(&rbd_dev_list_lock);
4371 dout("rbd_dev %p given dev id %llu\n", rbd_dev,
4372 (unsigned long long) rbd_dev->dev_id);
4376 * Remove an rbd_dev from the global list, and record that its
4377 * identifier is no longer in use.
4379 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4381 struct list_head *tmp;
4382 int rbd_id = rbd_dev->dev_id;
4385 rbd_assert(rbd_id > 0);
4387 dout("rbd_dev %p released dev id %llu\n", rbd_dev,
4388 (unsigned long long) rbd_dev->dev_id);
4389 spin_lock(&rbd_dev_list_lock);
4390 list_del_init(&rbd_dev->node);
4393 * If the id being "put" is not the current maximum, there
4394 * is nothing special we need to do.
4396 if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
4397 spin_unlock(&rbd_dev_list_lock);
4402 * We need to update the current maximum id. Search the
4403 * list to find out what it is. We're more likely to find
4404 * the maximum at the end, so search the list backward.
4407 list_for_each_prev(tmp, &rbd_dev_list) {
4408 struct rbd_device *rbd_dev;
4410 rbd_dev = list_entry(tmp, struct rbd_device, node);
4411 if (rbd_dev->dev_id > max_id)
4412 max_id = rbd_dev->dev_id;
4414 spin_unlock(&rbd_dev_list_lock);
4417 * The max id could have been updated by rbd_dev_id_get(), in
4418 * which case it now accurately reflects the new maximum.
4419 * Be careful not to overwrite the maximum value in that
4422 atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
4423 dout(" max dev id has been reset\n");
4427 * Skips over white space at *buf, and updates *buf to point to the
4428 * first found non-space character (if any). Returns the length of
4429 * the token (string of non-white space characters) found. Note
4430 * that *buf must be terminated with '\0'.
4432 static inline size_t next_token(const char **buf)
4435 * These are the characters that produce nonzero for
4436 * isspace() in the "C" and "POSIX" locales.
4438 const char *spaces = " \f\n\r\t\v";
4440 *buf += strspn(*buf, spaces); /* Find start of token */
4442 return strcspn(*buf, spaces); /* Return token length */
4446 * Finds the next token in *buf, and if the provided token buffer is
4447 * big enough, copies the found token into it. The result, if
4448 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4449 * must be terminated with '\0' on entry.
4451 * Returns the length of the token found (not including the '\0').
4452 * Return value will be 0 if no token is found, and it will be >=
4453 * token_size if the token would not fit.
4455 * The *buf pointer will be updated to point beyond the end of the
4456 * found token. Note that this occurs even if the token buffer is
4457 * too small to hold it.
4459 static inline size_t copy_token(const char **buf,
4465 len = next_token(buf);
4466 if (len < token_size) {
4467 memcpy(token, *buf, len);
4468 *(token + len) = '\0';
4476 * Finds the next token in *buf, dynamically allocates a buffer big
4477 * enough to hold a copy of it, and copies the token into the new
4478 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4479 * that a duplicate buffer is created even for a zero-length token.
4481 * Returns a pointer to the newly-allocated duplicate, or a null
4482 * pointer if memory for the duplicate was not available. If
4483 * the lenp argument is a non-null pointer, the length of the token
4484 * (not including the '\0') is returned in *lenp.
4486 * If successful, the *buf pointer will be updated to point beyond
4487 * the end of the found token.
4489 * Note: uses GFP_KERNEL for allocation.
4491 static inline char *dup_token(const char **buf, size_t *lenp)
4496 len = next_token(buf);
4497 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4500 *(dup + len) = '\0';
4510 * Parse the options provided for an "rbd add" (i.e., rbd image
4511 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4512 * and the data written is passed here via a NUL-terminated buffer.
4513 * Returns 0 if successful or an error code otherwise.
4515 * The information extracted from these options is recorded in
4516 * the other parameters which return dynamically-allocated
4519 * The address of a pointer that will refer to a ceph options
4520 * structure. Caller must release the returned pointer using
4521 * ceph_destroy_options() when it is no longer needed.
4523 * Address of an rbd options pointer. Fully initialized by
4524 * this function; caller must release with kfree().
4526 * Address of an rbd image specification pointer. Fully
4527 * initialized by this function based on parsed options.
4528 * Caller must release with rbd_spec_put().
4530 * The options passed take this form:
4531 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4534 * A comma-separated list of one or more monitor addresses.
4535 * A monitor address is an ip address, optionally followed
4536 * by a port number (separated by a colon).
4537 * I.e.: ip1[:port1][,ip2[:port2]...]
4539 * A comma-separated list of ceph and/or rbd options.
4541 * The name of the rados pool containing the rbd image.
4543 * The name of the image in that pool to map.
4545 * An optional snapshot id. If provided, the mapping will
4546 * present data from the image at the time that snapshot was
4547 * created. The image head is used if no snapshot id is
4548 * provided. Snapshot mappings are always read-only.
4550 static int rbd_add_parse_args(const char *buf,
4551 struct ceph_options **ceph_opts,
4552 struct rbd_options **opts,
4553 struct rbd_spec **rbd_spec)
4557 const char *mon_addrs;
4559 size_t mon_addrs_size;
4560 struct rbd_spec *spec = NULL;
4561 struct rbd_options *rbd_opts = NULL;
4562 struct ceph_options *copts;
4565 /* The first four tokens are required */
4567 len = next_token(&buf);
4569 rbd_warn(NULL, "no monitor address(es) provided");
4573 mon_addrs_size = len + 1;
4577 options = dup_token(&buf, NULL);
4581 rbd_warn(NULL, "no options provided");
4585 spec = rbd_spec_alloc();
4589 spec->pool_name = dup_token(&buf, NULL);
4590 if (!spec->pool_name)
4592 if (!*spec->pool_name) {
4593 rbd_warn(NULL, "no pool name provided");
4597 spec->image_name = dup_token(&buf, NULL);
4598 if (!spec->image_name)
4600 if (!*spec->image_name) {
4601 rbd_warn(NULL, "no image name provided");
4606 * Snapshot name is optional; default is to use "-"
4607 * (indicating the head/no snapshot).
4609 len = next_token(&buf);
4611 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4612 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4613 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4614 ret = -ENAMETOOLONG;
4617 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4620 *(snap_name + len) = '\0';
4621 spec->snap_name = snap_name;
4623 /* Initialize all rbd options to the defaults */
4625 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4629 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4631 copts = ceph_parse_options(options, mon_addrs,
4632 mon_addrs + mon_addrs_size - 1,
4633 parse_rbd_opts_token, rbd_opts);
4634 if (IS_ERR(copts)) {
4635 ret = PTR_ERR(copts);
4656 * An rbd format 2 image has a unique identifier, distinct from the
4657 * name given to it by the user. Internally, that identifier is
4658 * what's used to specify the names of objects related to the image.
4660 * A special "rbd id" object is used to map an rbd image name to its
4661 * id. If that object doesn't exist, then there is no v2 rbd image
4662 * with the supplied name.
4664 * This function will record the given rbd_dev's image_id field if
4665 * it can be determined, and in that case will return 0. If any
4666 * errors occur a negative errno will be returned and the rbd_dev's
4667 * image_id field will be unchanged (and should be NULL).
4669 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4678 * When probing a parent image, the image id is already
4679 * known (and the image name likely is not). There's no
4680 * need to fetch the image id again in this case. We
4681 * do still need to set the image format though.
4683 if (rbd_dev->spec->image_id) {
4684 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4690 * First, see if the format 2 image id file exists, and if
4691 * so, get the image's persistent id from it.
4693 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
4694 object_name = kmalloc(size, GFP_NOIO);
4697 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
4698 dout("rbd id object name is %s\n", object_name);
4700 /* Response will be an encoded string, which includes a length */
4702 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4703 response = kzalloc(size, GFP_NOIO);
4709 /* If it doesn't exist we'll assume it's a format 1 image */
4711 ret = rbd_obj_method_sync(rbd_dev, object_name,
4712 "rbd", "get_id", NULL, 0,
4713 response, RBD_IMAGE_ID_LEN_MAX);
4714 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4715 if (ret == -ENOENT) {
4716 image_id = kstrdup("", GFP_KERNEL);
4717 ret = image_id ? 0 : -ENOMEM;
4719 rbd_dev->image_format = 1;
4720 } else if (ret > sizeof (__le32)) {
4723 image_id = ceph_extract_encoded_string(&p, p + ret,
4725 ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0;
4727 rbd_dev->image_format = 2;
4733 rbd_dev->spec->image_id = image_id;
4734 dout("image_id is %s\n", image_id);
4744 * Undo whatever state changes are made by v1 or v2 header info
4747 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4749 struct rbd_image_header *header;
4751 /* Drop parent reference unless it's already been done (or none) */
4753 if (rbd_dev->parent_overlap)
4754 rbd_dev_parent_put(rbd_dev);
4756 /* Free dynamic fields from the header, then zero it out */
4758 header = &rbd_dev->header;
4759 ceph_put_snap_context(header->snapc);
4760 kfree(header->snap_sizes);
4761 kfree(header->snap_names);
4762 kfree(header->object_prefix);
4763 memset(header, 0, sizeof (*header));
4766 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
4770 ret = rbd_dev_v2_object_prefix(rbd_dev);
4775 * Get the and check features for the image. Currently the
4776 * features are assumed to never change.
4778 ret = rbd_dev_v2_features(rbd_dev);
4782 /* If the image supports fancy striping, get its parameters */
4784 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
4785 ret = rbd_dev_v2_striping_info(rbd_dev);
4789 /* No support for crypto and compression type format 2 images */
4793 rbd_dev->header.features = 0;
4794 kfree(rbd_dev->header.object_prefix);
4795 rbd_dev->header.object_prefix = NULL;
4800 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
4802 struct rbd_device *parent = NULL;
4803 struct rbd_spec *parent_spec;
4804 struct rbd_client *rbdc;
4807 if (!rbd_dev->parent_spec)
4810 * We need to pass a reference to the client and the parent
4811 * spec when creating the parent rbd_dev. Images related by
4812 * parent/child relationships always share both.
4814 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
4815 rbdc = __rbd_get_client(rbd_dev->rbd_client);
4818 parent = rbd_dev_create(rbdc, parent_spec);
4822 ret = rbd_dev_image_probe(parent, false);
4825 rbd_dev->parent = parent;
4826 atomic_set(&rbd_dev->parent_ref, 1);
4831 rbd_dev_unparent(rbd_dev);
4832 kfree(rbd_dev->header_name);
4833 rbd_dev_destroy(parent);
4835 rbd_put_client(rbdc);
4836 rbd_spec_put(parent_spec);
4842 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4846 /* generate unique id: find highest unique id, add one */
4847 rbd_dev_id_get(rbd_dev);
4849 /* Fill in the device name, now that we have its id. */
4850 BUILD_BUG_ON(DEV_NAME_LEN
4851 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
4852 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
4854 /* Get our block major device number. */
4856 ret = register_blkdev(0, rbd_dev->name);
4859 rbd_dev->major = ret;
4861 /* Set up the blkdev mapping. */
4863 ret = rbd_init_disk(rbd_dev);
4865 goto err_out_blkdev;
4867 ret = rbd_dev_mapping_set(rbd_dev);
4870 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
4872 ret = rbd_bus_add_dev(rbd_dev);
4874 goto err_out_mapping;
4876 /* Everything's ready. Announce the disk to the world. */
4878 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4879 add_disk(rbd_dev->disk);
4881 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
4882 (unsigned long long) rbd_dev->mapping.size);
4887 rbd_dev_mapping_clear(rbd_dev);
4889 rbd_free_disk(rbd_dev);
4891 unregister_blkdev(rbd_dev->major, rbd_dev->name);
4893 rbd_dev_id_put(rbd_dev);
4894 rbd_dev_mapping_clear(rbd_dev);
4899 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
4901 struct rbd_spec *spec = rbd_dev->spec;
4904 /* Record the header object name for this rbd image. */
4906 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4908 if (rbd_dev->image_format == 1)
4909 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
4911 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
4913 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
4914 if (!rbd_dev->header_name)
4917 if (rbd_dev->image_format == 1)
4918 sprintf(rbd_dev->header_name, "%s%s",
4919 spec->image_name, RBD_SUFFIX);
4921 sprintf(rbd_dev->header_name, "%s%s",
4922 RBD_HEADER_PREFIX, spec->image_id);
4926 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
4928 rbd_dev_unprobe(rbd_dev);
4929 kfree(rbd_dev->header_name);
4930 rbd_dev->header_name = NULL;
4931 rbd_dev->image_format = 0;
4932 kfree(rbd_dev->spec->image_id);
4933 rbd_dev->spec->image_id = NULL;
4935 rbd_dev_destroy(rbd_dev);
4939 * Probe for the existence of the header object for the given rbd
4940 * device. If this image is the one being mapped (i.e., not a
4941 * parent), initiate a watch on its header object before using that
4942 * object to get detailed information about the rbd image.
4944 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
4950 * Get the id from the image id object. Unless there's an
4951 * error, rbd_dev->spec->image_id will be filled in with
4952 * a dynamically-allocated string, and rbd_dev->image_format
4953 * will be set to either 1 or 2.
4955 ret = rbd_dev_image_id(rbd_dev);
4958 rbd_assert(rbd_dev->spec->image_id);
4959 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4961 ret = rbd_dev_header_name(rbd_dev);
4963 goto err_out_format;
4966 ret = rbd_dev_header_watch_sync(rbd_dev, true);
4968 goto out_header_name;
4971 if (rbd_dev->image_format == 1)
4972 ret = rbd_dev_v1_header_info(rbd_dev);
4974 ret = rbd_dev_v2_header_info(rbd_dev);
4978 ret = rbd_dev_spec_update(rbd_dev);
4982 ret = rbd_dev_probe_parent(rbd_dev);
4986 dout("discovered format %u image, header name is %s\n",
4987 rbd_dev->image_format, rbd_dev->header_name);
4991 rbd_dev_unprobe(rbd_dev);
4994 tmp = rbd_dev_header_watch_sync(rbd_dev, false);
4996 rbd_warn(rbd_dev, "unable to tear down "
4997 "watch request (%d)\n", tmp);
5000 kfree(rbd_dev->header_name);
5001 rbd_dev->header_name = NULL;
5003 rbd_dev->image_format = 0;
5004 kfree(rbd_dev->spec->image_id);
5005 rbd_dev->spec->image_id = NULL;
5007 dout("probe failed, returning %d\n", ret);
5012 static ssize_t rbd_add(struct bus_type *bus,
5016 struct rbd_device *rbd_dev = NULL;
5017 struct ceph_options *ceph_opts = NULL;
5018 struct rbd_options *rbd_opts = NULL;
5019 struct rbd_spec *spec = NULL;
5020 struct rbd_client *rbdc;
5021 struct ceph_osd_client *osdc;
5025 if (!try_module_get(THIS_MODULE))
5028 /* parse add command */
5029 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5031 goto err_out_module;
5032 read_only = rbd_opts->read_only;
5034 rbd_opts = NULL; /* done with this */
5036 rbdc = rbd_get_client(ceph_opts);
5043 osdc = &rbdc->client->osdc;
5044 rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
5046 goto err_out_client;
5047 spec->pool_id = (u64)rc;
5049 /* The ceph file layout needs to fit pool id in 32 bits */
5051 if (spec->pool_id > (u64)U32_MAX) {
5052 rbd_warn(NULL, "pool id too large (%llu > %u)\n",
5053 (unsigned long long)spec->pool_id, U32_MAX);
5055 goto err_out_client;
5058 rbd_dev = rbd_dev_create(rbdc, spec);
5060 goto err_out_client;
5061 rbdc = NULL; /* rbd_dev now owns this */
5062 spec = NULL; /* rbd_dev now owns this */
5064 rc = rbd_dev_image_probe(rbd_dev, true);
5066 goto err_out_rbd_dev;
5068 /* If we are mapping a snapshot it must be marked read-only */
5070 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5072 rbd_dev->mapping.read_only = read_only;
5074 rc = rbd_dev_device_setup(rbd_dev);
5076 rbd_dev_image_release(rbd_dev);
5077 goto err_out_module;
5083 rbd_dev_destroy(rbd_dev);
5085 rbd_put_client(rbdc);
5089 module_put(THIS_MODULE);
5091 dout("Error adding device %s\n", buf);
5096 static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
5098 struct list_head *tmp;
5099 struct rbd_device *rbd_dev;
5101 spin_lock(&rbd_dev_list_lock);
5102 list_for_each(tmp, &rbd_dev_list) {
5103 rbd_dev = list_entry(tmp, struct rbd_device, node);
5104 if (rbd_dev->dev_id == dev_id) {
5105 spin_unlock(&rbd_dev_list_lock);
5109 spin_unlock(&rbd_dev_list_lock);
5113 static void rbd_dev_device_release(struct device *dev)
5115 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5117 rbd_free_disk(rbd_dev);
5118 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5119 rbd_dev_mapping_clear(rbd_dev);
5120 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5122 rbd_dev_id_put(rbd_dev);
5123 rbd_dev_mapping_clear(rbd_dev);
5126 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5128 while (rbd_dev->parent) {
5129 struct rbd_device *first = rbd_dev;
5130 struct rbd_device *second = first->parent;
5131 struct rbd_device *third;
5134 * Follow to the parent with no grandparent and
5137 while (second && (third = second->parent)) {
5142 rbd_dev_image_release(second);
5143 first->parent = NULL;
5144 first->parent_overlap = 0;
5146 rbd_assert(first->parent_spec);
5147 rbd_spec_put(first->parent_spec);
5148 first->parent_spec = NULL;
5152 static ssize_t rbd_remove(struct bus_type *bus,
5156 struct rbd_device *rbd_dev = NULL;
5161 ret = strict_strtoul(buf, 10, &ul);
5165 /* convert to int; abort if we lost anything in the conversion */
5166 target_id = (int) ul;
5167 if (target_id != ul)
5170 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
5172 rbd_dev = __rbd_get_dev(target_id);
5178 spin_lock_irq(&rbd_dev->lock);
5179 if (rbd_dev->open_count)
5182 set_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
5183 spin_unlock_irq(&rbd_dev->lock);
5186 rbd_bus_del_dev(rbd_dev);
5187 ret = rbd_dev_header_watch_sync(rbd_dev, false);
5189 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
5190 rbd_dev_image_release(rbd_dev);
5191 module_put(THIS_MODULE);
5194 mutex_unlock(&ctl_mutex);
5200 * create control files in sysfs
5203 static int rbd_sysfs_init(void)
5207 ret = device_register(&rbd_root_dev);
5211 ret = bus_register(&rbd_bus_type);
5213 device_unregister(&rbd_root_dev);
5218 static void rbd_sysfs_cleanup(void)
5220 bus_unregister(&rbd_bus_type);
5221 device_unregister(&rbd_root_dev);
5224 static int rbd_slab_init(void)
5226 rbd_assert(!rbd_img_request_cache);
5227 rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5228 sizeof (struct rbd_img_request),
5229 __alignof__(struct rbd_img_request),
5231 if (!rbd_img_request_cache)
5234 rbd_assert(!rbd_obj_request_cache);
5235 rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5236 sizeof (struct rbd_obj_request),
5237 __alignof__(struct rbd_obj_request),
5239 if (!rbd_obj_request_cache)
5242 rbd_assert(!rbd_segment_name_cache);
5243 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5244 MAX_OBJ_NAME_SIZE + 1, 1, 0, NULL);
5245 if (rbd_segment_name_cache)
5248 if (rbd_obj_request_cache) {
5249 kmem_cache_destroy(rbd_obj_request_cache);
5250 rbd_obj_request_cache = NULL;
5253 kmem_cache_destroy(rbd_img_request_cache);
5254 rbd_img_request_cache = NULL;
5259 static void rbd_slab_exit(void)
5261 rbd_assert(rbd_segment_name_cache);
5262 kmem_cache_destroy(rbd_segment_name_cache);
5263 rbd_segment_name_cache = NULL;
5265 rbd_assert(rbd_obj_request_cache);
5266 kmem_cache_destroy(rbd_obj_request_cache);
5267 rbd_obj_request_cache = NULL;
5269 rbd_assert(rbd_img_request_cache);
5270 kmem_cache_destroy(rbd_img_request_cache);
5271 rbd_img_request_cache = NULL;
5274 static int __init rbd_init(void)
5278 if (!libceph_compatible(NULL)) {
5279 rbd_warn(NULL, "libceph incompatibility (quitting)");
5283 rc = rbd_slab_init();
5286 rc = rbd_sysfs_init();
5290 pr_info("loaded " RBD_DRV_NAME_LONG "\n");
5295 static void __exit rbd_exit(void)
5297 rbd_sysfs_cleanup();
5301 module_init(rbd_init);
5302 module_exit(rbd_exit);
5304 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5305 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5306 MODULE_DESCRIPTION("rados block device");
5308 /* following authorship retained from original osdblk.c */
5309 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5311 MODULE_LICENSE("GPL");