3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
42 #include <linux/blkdev.h>
43 #include <linux/slab.h>
44 #include <linux/idr.h>
46 #include "rbd_types.h"
48 #define RBD_DEBUG /* Activate rbd_assert() calls */
51 * The basic unit of block I/O is a sector. It is interpreted in a
52 * number of contexts in Linux (blk, bio, genhd), but the default is
53 * universally 512 bytes. These symbols are just slightly more
54 * meaningful than the bare numbers they represent.
56 #define SECTOR_SHIFT 9
57 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
60 * Increment the given counter and return its updated value.
61 * If the counter is already 0 it will not be incremented.
62 * If the counter is already at its maximum value returns
63 * -EINVAL without updating it.
65 static int atomic_inc_return_safe(atomic_t *v)
69 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
70 if (counter <= (unsigned int)INT_MAX)
78 /* Decrement the counter. Return the resulting value, or -EINVAL */
79 static int atomic_dec_return_safe(atomic_t *v)
83 counter = atomic_dec_return(v);
92 #define RBD_DRV_NAME "rbd"
94 #define RBD_MINORS_PER_MAJOR 256
95 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
97 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
98 #define RBD_MAX_SNAP_NAME_LEN \
99 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
101 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
103 #define RBD_SNAP_HEAD_NAME "-"
105 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
107 /* This allows a single page to hold an image name sent by OSD */
108 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
109 #define RBD_IMAGE_ID_LEN_MAX 64
111 #define RBD_OBJ_PREFIX_LEN_MAX 64
115 #define RBD_FEATURE_LAYERING (1<<0)
116 #define RBD_FEATURE_STRIPINGV2 (1<<1)
117 #define RBD_FEATURES_ALL \
118 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
120 /* Features supported by this (client software) implementation. */
122 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
125 * An RBD device name will be "rbd#", where the "rbd" comes from
126 * RBD_DRV_NAME above, and # is a unique integer identifier.
127 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
128 * enough to hold all possible device names.
130 #define DEV_NAME_LEN 32
131 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
134 * block device image metadata (in-memory version)
136 struct rbd_image_header {
137 /* These six fields never change for a given rbd image */
144 u64 features; /* Might be changeable someday? */
146 /* The remaining fields need to be updated occasionally */
148 struct ceph_snap_context *snapc;
149 char *snap_names; /* format 1 only */
150 u64 *snap_sizes; /* format 1 only */
154 * An rbd image specification.
156 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
157 * identify an image. Each rbd_dev structure includes a pointer to
158 * an rbd_spec structure that encapsulates this identity.
160 * Each of the id's in an rbd_spec has an associated name. For a
161 * user-mapped image, the names are supplied and the id's associated
162 * with them are looked up. For a layered image, a parent image is
163 * defined by the tuple, and the names are looked up.
165 * An rbd_dev structure contains a parent_spec pointer which is
166 * non-null if the image it represents is a child in a layered
167 * image. This pointer will refer to the rbd_spec structure used
168 * by the parent rbd_dev for its own identity (i.e., the structure
169 * is shared between the parent and child).
171 * Since these structures are populated once, during the discovery
172 * phase of image construction, they are effectively immutable so
173 * we make no effort to synchronize access to them.
175 * Note that code herein does not assume the image name is known (it
176 * could be a null pointer).
180 const char *pool_name;
182 const char *image_id;
183 const char *image_name;
186 const char *snap_name;
192 * an instance of the client. multiple devices may share an rbd client.
195 struct ceph_client *client;
197 struct list_head node;
200 struct rbd_img_request;
201 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
203 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
205 struct rbd_obj_request;
206 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
208 enum obj_request_type {
209 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
213 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
214 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
215 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
216 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
219 struct rbd_obj_request {
220 const char *object_name;
221 u64 offset; /* object start byte */
222 u64 length; /* bytes from offset */
226 * An object request associated with an image will have its
227 * img_data flag set; a standalone object request will not.
229 * A standalone object request will have which == BAD_WHICH
230 * and a null obj_request pointer.
232 * An object request initiated in support of a layered image
233 * object (to check for its existence before a write) will
234 * have which == BAD_WHICH and a non-null obj_request pointer.
236 * Finally, an object request for rbd image data will have
237 * which != BAD_WHICH, and will have a non-null img_request
238 * pointer. The value of which will be in the range
239 * 0..(img_request->obj_request_count-1).
242 struct rbd_obj_request *obj_request; /* STAT op */
244 struct rbd_img_request *img_request;
246 /* links for img_request->obj_requests list */
247 struct list_head links;
250 u32 which; /* posn image request list */
252 enum obj_request_type type;
254 struct bio *bio_list;
260 struct page **copyup_pages;
261 u32 copyup_page_count;
263 struct ceph_osd_request *osd_req;
265 u64 xferred; /* bytes transferred */
268 rbd_obj_callback_t callback;
269 struct completion completion;
275 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
276 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
277 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
280 struct rbd_img_request {
281 struct rbd_device *rbd_dev;
282 u64 offset; /* starting image byte offset */
283 u64 length; /* byte count from offset */
286 u64 snap_id; /* for reads */
287 struct ceph_snap_context *snapc; /* for writes */
290 struct request *rq; /* block request */
291 struct rbd_obj_request *obj_request; /* obj req initiator */
293 struct page **copyup_pages;
294 u32 copyup_page_count;
295 spinlock_t completion_lock;/* protects next_completion */
297 rbd_img_callback_t callback;
298 u64 xferred;/* aggregate bytes transferred */
299 int result; /* first nonzero obj_request result */
301 u32 obj_request_count;
302 struct list_head obj_requests; /* rbd_obj_request structs */
307 #define for_each_obj_request(ireq, oreq) \
308 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
309 #define for_each_obj_request_from(ireq, oreq) \
310 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
311 #define for_each_obj_request_safe(ireq, oreq, n) \
312 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
324 int dev_id; /* blkdev unique id */
326 int major; /* blkdev assigned major */
328 struct gendisk *disk; /* blkdev's gendisk and rq */
330 u32 image_format; /* Either 1 or 2 */
331 struct rbd_client *rbd_client;
333 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
335 spinlock_t lock; /* queue, flags, open_count */
337 struct rbd_image_header header;
338 unsigned long flags; /* possibly lock protected */
339 struct rbd_spec *spec;
343 struct ceph_file_layout layout;
345 struct ceph_osd_event *watch_event;
346 struct rbd_obj_request *watch_request;
348 struct rbd_spec *parent_spec;
351 struct rbd_device *parent;
353 /* protects updating the header */
354 struct rw_semaphore header_rwsem;
356 struct rbd_mapping mapping;
358 struct list_head node;
362 unsigned long open_count; /* protected by lock */
366 * Flag bits for rbd_dev->flags. If atomicity is required,
367 * rbd_dev->lock is used to protect access.
369 * Currently, only the "removing" flag (which is coupled with the
370 * "open_count" field) requires atomic access.
373 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
374 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
377 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
379 static LIST_HEAD(rbd_dev_list); /* devices */
380 static DEFINE_SPINLOCK(rbd_dev_list_lock);
382 static LIST_HEAD(rbd_client_list); /* clients */
383 static DEFINE_SPINLOCK(rbd_client_list_lock);
385 /* Slab caches for frequently-allocated structures */
387 static struct kmem_cache *rbd_img_request_cache;
388 static struct kmem_cache *rbd_obj_request_cache;
389 static struct kmem_cache *rbd_segment_name_cache;
391 static int rbd_major;
392 static DEFINE_IDA(rbd_dev_id_ida);
395 * Default to false for now, as single-major requires >= 0.75 version of
396 * userspace rbd utility.
398 static bool single_major = false;
399 module_param(single_major, bool, S_IRUGO);
400 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
402 static int rbd_img_request_submit(struct rbd_img_request *img_request);
404 static void rbd_dev_device_release(struct device *dev);
406 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
408 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
410 static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
412 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
414 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
415 static void rbd_spec_put(struct rbd_spec *spec);
417 static int rbd_dev_id_to_minor(int dev_id)
419 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
422 static int minor_to_rbd_dev_id(int minor)
424 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
427 static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
428 static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
429 static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
430 static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
432 static struct attribute *rbd_bus_attrs[] = {
434 &bus_attr_remove.attr,
435 &bus_attr_add_single_major.attr,
436 &bus_attr_remove_single_major.attr,
440 static umode_t rbd_bus_is_visible(struct kobject *kobj,
441 struct attribute *attr, int index)
444 (attr == &bus_attr_add_single_major.attr ||
445 attr == &bus_attr_remove_single_major.attr))
451 static const struct attribute_group rbd_bus_group = {
452 .attrs = rbd_bus_attrs,
453 .is_visible = rbd_bus_is_visible,
455 __ATTRIBUTE_GROUPS(rbd_bus);
457 static struct bus_type rbd_bus_type = {
459 .bus_groups = rbd_bus_groups,
462 static void rbd_root_dev_release(struct device *dev)
466 static struct device rbd_root_dev = {
468 .release = rbd_root_dev_release,
471 static __printf(2, 3)
472 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
474 struct va_format vaf;
482 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
483 else if (rbd_dev->disk)
484 printk(KERN_WARNING "%s: %s: %pV\n",
485 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
486 else if (rbd_dev->spec && rbd_dev->spec->image_name)
487 printk(KERN_WARNING "%s: image %s: %pV\n",
488 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
489 else if (rbd_dev->spec && rbd_dev->spec->image_id)
490 printk(KERN_WARNING "%s: id %s: %pV\n",
491 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
493 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
494 RBD_DRV_NAME, rbd_dev, &vaf);
499 #define rbd_assert(expr) \
500 if (unlikely(!(expr))) { \
501 printk(KERN_ERR "\nAssertion failure in %s() " \
503 "\trbd_assert(%s);\n\n", \
504 __func__, __LINE__, #expr); \
507 #else /* !RBD_DEBUG */
508 # define rbd_assert(expr) ((void) 0)
509 #endif /* !RBD_DEBUG */
511 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
512 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
513 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
515 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
516 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
517 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev);
518 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
520 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
521 u8 *order, u64 *snap_size);
522 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
524 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
526 static int rbd_open(struct block_device *bdev, fmode_t mode)
528 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
529 bool removing = false;
531 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
534 spin_lock_irq(&rbd_dev->lock);
535 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
538 rbd_dev->open_count++;
539 spin_unlock_irq(&rbd_dev->lock);
543 (void) get_device(&rbd_dev->dev);
544 set_device_ro(bdev, rbd_dev->mapping.read_only);
549 static void rbd_release(struct gendisk *disk, fmode_t mode)
551 struct rbd_device *rbd_dev = disk->private_data;
552 unsigned long open_count_before;
554 spin_lock_irq(&rbd_dev->lock);
555 open_count_before = rbd_dev->open_count--;
556 spin_unlock_irq(&rbd_dev->lock);
557 rbd_assert(open_count_before > 0);
559 put_device(&rbd_dev->dev);
562 static const struct block_device_operations rbd_bd_ops = {
563 .owner = THIS_MODULE,
565 .release = rbd_release,
569 * Initialize an rbd client instance. Success or not, this function
570 * consumes ceph_opts. Caller holds client_mutex.
572 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
574 struct rbd_client *rbdc;
577 dout("%s:\n", __func__);
578 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
582 kref_init(&rbdc->kref);
583 INIT_LIST_HEAD(&rbdc->node);
585 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
586 if (IS_ERR(rbdc->client))
588 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
590 ret = ceph_open_session(rbdc->client);
594 spin_lock(&rbd_client_list_lock);
595 list_add_tail(&rbdc->node, &rbd_client_list);
596 spin_unlock(&rbd_client_list_lock);
598 dout("%s: rbdc %p\n", __func__, rbdc);
602 ceph_destroy_client(rbdc->client);
607 ceph_destroy_options(ceph_opts);
608 dout("%s: error %d\n", __func__, ret);
613 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
615 kref_get(&rbdc->kref);
621 * Find a ceph client with specific addr and configuration. If
622 * found, bump its reference count.
624 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
626 struct rbd_client *client_node;
629 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
632 spin_lock(&rbd_client_list_lock);
633 list_for_each_entry(client_node, &rbd_client_list, node) {
634 if (!ceph_compare_options(ceph_opts, client_node->client)) {
635 __rbd_get_client(client_node);
641 spin_unlock(&rbd_client_list_lock);
643 return found ? client_node : NULL;
653 /* string args above */
656 /* Boolean args above */
660 static match_table_t rbd_opts_tokens = {
662 /* string args above */
663 {Opt_read_only, "read_only"},
664 {Opt_read_only, "ro"}, /* Alternate spelling */
665 {Opt_read_write, "read_write"},
666 {Opt_read_write, "rw"}, /* Alternate spelling */
667 /* Boolean args above */
675 #define RBD_READ_ONLY_DEFAULT false
677 static int parse_rbd_opts_token(char *c, void *private)
679 struct rbd_options *rbd_opts = private;
680 substring_t argstr[MAX_OPT_ARGS];
681 int token, intval, ret;
683 token = match_token(c, rbd_opts_tokens, argstr);
687 if (token < Opt_last_int) {
688 ret = match_int(&argstr[0], &intval);
690 pr_err("bad mount option arg (not int) "
694 dout("got int token %d val %d\n", token, intval);
695 } else if (token > Opt_last_int && token < Opt_last_string) {
696 dout("got string token %d val %s\n", token,
698 } else if (token > Opt_last_string && token < Opt_last_bool) {
699 dout("got Boolean token %d\n", token);
701 dout("got token %d\n", token);
706 rbd_opts->read_only = true;
709 rbd_opts->read_only = false;
719 * Get a ceph client with specific addr and configuration, if one does
720 * not exist create it. Either way, ceph_opts is consumed by this
723 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
725 struct rbd_client *rbdc;
727 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
728 rbdc = rbd_client_find(ceph_opts);
729 if (rbdc) /* using an existing client */
730 ceph_destroy_options(ceph_opts);
732 rbdc = rbd_client_create(ceph_opts);
733 mutex_unlock(&client_mutex);
739 * Destroy ceph client
741 * Caller must hold rbd_client_list_lock.
743 static void rbd_client_release(struct kref *kref)
745 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
747 dout("%s: rbdc %p\n", __func__, rbdc);
748 spin_lock(&rbd_client_list_lock);
749 list_del(&rbdc->node);
750 spin_unlock(&rbd_client_list_lock);
752 ceph_destroy_client(rbdc->client);
757 * Drop reference to ceph client node. If it's not referenced anymore, release
760 static void rbd_put_client(struct rbd_client *rbdc)
763 kref_put(&rbdc->kref, rbd_client_release);
766 static bool rbd_image_format_valid(u32 image_format)
768 return image_format == 1 || image_format == 2;
771 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
776 /* The header has to start with the magic rbd header text */
777 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
780 /* The bio layer requires at least sector-sized I/O */
782 if (ondisk->options.order < SECTOR_SHIFT)
785 /* If we use u64 in a few spots we may be able to loosen this */
787 if (ondisk->options.order > 8 * sizeof (int) - 1)
791 * The size of a snapshot header has to fit in a size_t, and
792 * that limits the number of snapshots.
794 snap_count = le32_to_cpu(ondisk->snap_count);
795 size = SIZE_MAX - sizeof (struct ceph_snap_context);
796 if (snap_count > size / sizeof (__le64))
800 * Not only that, but the size of the entire the snapshot
801 * header must also be representable in a size_t.
803 size -= snap_count * sizeof (__le64);
804 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
811 * Fill an rbd image header with information from the given format 1
814 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
815 struct rbd_image_header_ondisk *ondisk)
817 struct rbd_image_header *header = &rbd_dev->header;
818 bool first_time = header->object_prefix == NULL;
819 struct ceph_snap_context *snapc;
820 char *object_prefix = NULL;
821 char *snap_names = NULL;
822 u64 *snap_sizes = NULL;
828 /* Allocate this now to avoid having to handle failure below */
833 len = strnlen(ondisk->object_prefix,
834 sizeof (ondisk->object_prefix));
835 object_prefix = kmalloc(len + 1, GFP_KERNEL);
838 memcpy(object_prefix, ondisk->object_prefix, len);
839 object_prefix[len] = '\0';
842 /* Allocate the snapshot context and fill it in */
844 snap_count = le32_to_cpu(ondisk->snap_count);
845 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
848 snapc->seq = le64_to_cpu(ondisk->snap_seq);
850 struct rbd_image_snap_ondisk *snaps;
851 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
853 /* We'll keep a copy of the snapshot names... */
855 if (snap_names_len > (u64)SIZE_MAX)
857 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
861 /* ...as well as the array of their sizes. */
863 size = snap_count * sizeof (*header->snap_sizes);
864 snap_sizes = kmalloc(size, GFP_KERNEL);
869 * Copy the names, and fill in each snapshot's id
872 * Note that rbd_dev_v1_header_info() guarantees the
873 * ondisk buffer we're working with has
874 * snap_names_len bytes beyond the end of the
875 * snapshot id array, this memcpy() is safe.
877 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
878 snaps = ondisk->snaps;
879 for (i = 0; i < snap_count; i++) {
880 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
881 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
885 /* We won't fail any more, fill in the header */
888 header->object_prefix = object_prefix;
889 header->obj_order = ondisk->options.order;
890 header->crypt_type = ondisk->options.crypt_type;
891 header->comp_type = ondisk->options.comp_type;
892 /* The rest aren't used for format 1 images */
893 header->stripe_unit = 0;
894 header->stripe_count = 0;
895 header->features = 0;
897 ceph_put_snap_context(header->snapc);
898 kfree(header->snap_names);
899 kfree(header->snap_sizes);
902 /* The remaining fields always get updated (when we refresh) */
904 header->image_size = le64_to_cpu(ondisk->image_size);
905 header->snapc = snapc;
906 header->snap_names = snap_names;
907 header->snap_sizes = snap_sizes;
909 /* Make sure mapping size is consistent with header info */
911 if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time)
912 if (rbd_dev->mapping.size != header->image_size)
913 rbd_dev->mapping.size = header->image_size;
921 ceph_put_snap_context(snapc);
922 kfree(object_prefix);
927 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
929 const char *snap_name;
931 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
933 /* Skip over names until we find the one we are looking for */
935 snap_name = rbd_dev->header.snap_names;
937 snap_name += strlen(snap_name) + 1;
939 return kstrdup(snap_name, GFP_KERNEL);
943 * Snapshot id comparison function for use with qsort()/bsearch().
944 * Note that result is for snapshots in *descending* order.
946 static int snapid_compare_reverse(const void *s1, const void *s2)
948 u64 snap_id1 = *(u64 *)s1;
949 u64 snap_id2 = *(u64 *)s2;
951 if (snap_id1 < snap_id2)
953 return snap_id1 == snap_id2 ? 0 : -1;
957 * Search a snapshot context to see if the given snapshot id is
960 * Returns the position of the snapshot id in the array if it's found,
961 * or BAD_SNAP_INDEX otherwise.
963 * Note: The snapshot array is in kept sorted (by the osd) in
964 * reverse order, highest snapshot id first.
966 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
968 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
971 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
972 sizeof (snap_id), snapid_compare_reverse);
974 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
977 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
981 const char *snap_name;
983 which = rbd_dev_snap_index(rbd_dev, snap_id);
984 if (which == BAD_SNAP_INDEX)
985 return ERR_PTR(-ENOENT);
987 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
988 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
991 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
993 if (snap_id == CEPH_NOSNAP)
994 return RBD_SNAP_HEAD_NAME;
996 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
997 if (rbd_dev->image_format == 1)
998 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1000 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1003 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1006 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1007 if (snap_id == CEPH_NOSNAP) {
1008 *snap_size = rbd_dev->header.image_size;
1009 } else if (rbd_dev->image_format == 1) {
1012 which = rbd_dev_snap_index(rbd_dev, snap_id);
1013 if (which == BAD_SNAP_INDEX)
1016 *snap_size = rbd_dev->header.snap_sizes[which];
1021 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1030 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1033 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1034 if (snap_id == CEPH_NOSNAP) {
1035 *snap_features = rbd_dev->header.features;
1036 } else if (rbd_dev->image_format == 1) {
1037 *snap_features = 0; /* No features for format 1 */
1042 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1046 *snap_features = features;
1051 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1053 u64 snap_id = rbd_dev->spec->snap_id;
1058 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1061 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1065 rbd_dev->mapping.size = size;
1066 rbd_dev->mapping.features = features;
1071 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1073 rbd_dev->mapping.size = 0;
1074 rbd_dev->mapping.features = 0;
1077 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1084 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1087 segment = offset >> rbd_dev->header.obj_order;
1088 name_format = "%s.%012llx";
1089 if (rbd_dev->image_format == 2)
1090 name_format = "%s.%016llx";
1091 ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
1092 rbd_dev->header.object_prefix, segment);
1093 if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
1094 pr_err("error formatting segment name for #%llu (%d)\n",
1103 static void rbd_segment_name_free(const char *name)
1105 /* The explicit cast here is needed to drop the const qualifier */
1107 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1110 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1112 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1114 return offset & (segment_size - 1);
1117 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1118 u64 offset, u64 length)
1120 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1122 offset &= segment_size - 1;
1124 rbd_assert(length <= U64_MAX - offset);
1125 if (offset + length > segment_size)
1126 length = segment_size - offset;
1132 * returns the size of an object in the image
1134 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1136 return 1 << header->obj_order;
1143 static void bio_chain_put(struct bio *chain)
1149 chain = chain->bi_next;
1155 * zeros a bio chain, starting at specific offset
1157 static void zero_bio_chain(struct bio *chain, int start_ofs)
1160 unsigned long flags;
1166 bio_for_each_segment(bv, chain, i) {
1167 if (pos + bv->bv_len > start_ofs) {
1168 int remainder = max(start_ofs - pos, 0);
1169 buf = bvec_kmap_irq(bv, &flags);
1170 memset(buf + remainder, 0,
1171 bv->bv_len - remainder);
1172 flush_dcache_page(bv->bv_page);
1173 bvec_kunmap_irq(buf, &flags);
1178 chain = chain->bi_next;
1183 * similar to zero_bio_chain(), zeros data defined by a page array,
1184 * starting at the given byte offset from the start of the array and
1185 * continuing up to the given end offset. The pages array is
1186 * assumed to be big enough to hold all bytes up to the end.
1188 static void zero_pages(struct page **pages, u64 offset, u64 end)
1190 struct page **page = &pages[offset >> PAGE_SHIFT];
1192 rbd_assert(end > offset);
1193 rbd_assert(end - offset <= (u64)SIZE_MAX);
1194 while (offset < end) {
1197 unsigned long flags;
1200 page_offset = offset & ~PAGE_MASK;
1201 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
1202 local_irq_save(flags);
1203 kaddr = kmap_atomic(*page);
1204 memset(kaddr + page_offset, 0, length);
1205 flush_dcache_page(*page);
1206 kunmap_atomic(kaddr);
1207 local_irq_restore(flags);
1215 * Clone a portion of a bio, starting at the given byte offset
1216 * and continuing for the number of bytes indicated.
1218 static struct bio *bio_clone_range(struct bio *bio_src,
1219 unsigned int offset,
1227 unsigned short end_idx;
1228 unsigned short vcnt;
1231 /* Handle the easy case for the caller */
1233 if (!offset && len == bio_src->bi_size)
1234 return bio_clone(bio_src, gfpmask);
1236 if (WARN_ON_ONCE(!len))
1238 if (WARN_ON_ONCE(len > bio_src->bi_size))
1240 if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
1243 /* Find first affected segment... */
1246 bio_for_each_segment(bv, bio_src, idx) {
1247 if (resid < bv->bv_len)
1249 resid -= bv->bv_len;
1253 /* ...and the last affected segment */
1256 __bio_for_each_segment(bv, bio_src, end_idx, idx) {
1257 if (resid <= bv->bv_len)
1259 resid -= bv->bv_len;
1261 vcnt = end_idx - idx + 1;
1263 /* Build the clone */
1265 bio = bio_alloc(gfpmask, (unsigned int) vcnt);
1267 return NULL; /* ENOMEM */
1269 bio->bi_bdev = bio_src->bi_bdev;
1270 bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
1271 bio->bi_rw = bio_src->bi_rw;
1272 bio->bi_flags |= 1 << BIO_CLONED;
1275 * Copy over our part of the bio_vec, then update the first
1276 * and last (or only) entries.
1278 memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
1279 vcnt * sizeof (struct bio_vec));
1280 bio->bi_io_vec[0].bv_offset += voff;
1282 bio->bi_io_vec[0].bv_len -= voff;
1283 bio->bi_io_vec[vcnt - 1].bv_len = resid;
1285 bio->bi_io_vec[0].bv_len = len;
1288 bio->bi_vcnt = vcnt;
1296 * Clone a portion of a bio chain, starting at the given byte offset
1297 * into the first bio in the source chain and continuing for the
1298 * number of bytes indicated. The result is another bio chain of
1299 * exactly the given length, or a null pointer on error.
1301 * The bio_src and offset parameters are both in-out. On entry they
1302 * refer to the first source bio and the offset into that bio where
1303 * the start of data to be cloned is located.
1305 * On return, bio_src is updated to refer to the bio in the source
1306 * chain that contains first un-cloned byte, and *offset will
1307 * contain the offset of that byte within that bio.
1309 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1310 unsigned int *offset,
1314 struct bio *bi = *bio_src;
1315 unsigned int off = *offset;
1316 struct bio *chain = NULL;
1319 /* Build up a chain of clone bios up to the limit */
1321 if (!bi || off >= bi->bi_size || !len)
1322 return NULL; /* Nothing to clone */
1326 unsigned int bi_size;
1330 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1331 goto out_err; /* EINVAL; ran out of bio's */
1333 bi_size = min_t(unsigned int, bi->bi_size - off, len);
1334 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1336 goto out_err; /* ENOMEM */
1339 end = &bio->bi_next;
1342 if (off == bi->bi_size) {
1353 bio_chain_put(chain);
1359 * The default/initial value for all object request flags is 0. For
1360 * each flag, once its value is set to 1 it is never reset to 0
1363 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1365 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1366 struct rbd_device *rbd_dev;
1368 rbd_dev = obj_request->img_request->rbd_dev;
1369 rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
1374 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1377 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1380 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1382 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1383 struct rbd_device *rbd_dev = NULL;
1385 if (obj_request_img_data_test(obj_request))
1386 rbd_dev = obj_request->img_request->rbd_dev;
1387 rbd_warn(rbd_dev, "obj_request %p already marked done\n",
1392 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1395 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1399 * This sets the KNOWN flag after (possibly) setting the EXISTS
1400 * flag. The latter is set based on the "exists" value provided.
1402 * Note that for our purposes once an object exists it never goes
1403 * away again. It's possible that the response from two existence
1404 * checks are separated by the creation of the target object, and
1405 * the first ("doesn't exist") response arrives *after* the second
1406 * ("does exist"). In that case we ignore the second one.
1408 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1412 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1413 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1417 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1420 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1423 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1426 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1429 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1431 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1432 atomic_read(&obj_request->kref.refcount));
1433 kref_get(&obj_request->kref);
1436 static void rbd_obj_request_destroy(struct kref *kref);
1437 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1439 rbd_assert(obj_request != NULL);
1440 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1441 atomic_read(&obj_request->kref.refcount));
1442 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1445 static bool img_request_child_test(struct rbd_img_request *img_request);
1446 static void rbd_parent_request_destroy(struct kref *kref);
1447 static void rbd_img_request_destroy(struct kref *kref);
1448 static void rbd_img_request_put(struct rbd_img_request *img_request)
1450 rbd_assert(img_request != NULL);
1451 dout("%s: img %p (was %d)\n", __func__, img_request,
1452 atomic_read(&img_request->kref.refcount));
1453 if (img_request_child_test(img_request))
1454 kref_put(&img_request->kref, rbd_parent_request_destroy);
1456 kref_put(&img_request->kref, rbd_img_request_destroy);
1459 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1460 struct rbd_obj_request *obj_request)
1462 rbd_assert(obj_request->img_request == NULL);
1464 /* Image request now owns object's original reference */
1465 obj_request->img_request = img_request;
1466 obj_request->which = img_request->obj_request_count;
1467 rbd_assert(!obj_request_img_data_test(obj_request));
1468 obj_request_img_data_set(obj_request);
1469 rbd_assert(obj_request->which != BAD_WHICH);
1470 img_request->obj_request_count++;
1471 list_add_tail(&obj_request->links, &img_request->obj_requests);
1472 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1473 obj_request->which);
1476 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1477 struct rbd_obj_request *obj_request)
1479 rbd_assert(obj_request->which != BAD_WHICH);
1481 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1482 obj_request->which);
1483 list_del(&obj_request->links);
1484 rbd_assert(img_request->obj_request_count > 0);
1485 img_request->obj_request_count--;
1486 rbd_assert(obj_request->which == img_request->obj_request_count);
1487 obj_request->which = BAD_WHICH;
1488 rbd_assert(obj_request_img_data_test(obj_request));
1489 rbd_assert(obj_request->img_request == img_request);
1490 obj_request->img_request = NULL;
1491 obj_request->callback = NULL;
1492 rbd_obj_request_put(obj_request);
1495 static bool obj_request_type_valid(enum obj_request_type type)
1498 case OBJ_REQUEST_NODATA:
1499 case OBJ_REQUEST_BIO:
1500 case OBJ_REQUEST_PAGES:
1507 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1508 struct rbd_obj_request *obj_request)
1510 dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1512 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1515 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1518 dout("%s: img %p\n", __func__, img_request);
1521 * If no error occurred, compute the aggregate transfer
1522 * count for the image request. We could instead use
1523 * atomic64_cmpxchg() to update it as each object request
1524 * completes; not clear which way is better off hand.
1526 if (!img_request->result) {
1527 struct rbd_obj_request *obj_request;
1530 for_each_obj_request(img_request, obj_request)
1531 xferred += obj_request->xferred;
1532 img_request->xferred = xferred;
1535 if (img_request->callback)
1536 img_request->callback(img_request);
1538 rbd_img_request_put(img_request);
1541 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1543 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1545 dout("%s: obj %p\n", __func__, obj_request);
1547 return wait_for_completion_interruptible(&obj_request->completion);
1551 * The default/initial value for all image request flags is 0. Each
1552 * is conditionally set to 1 at image request initialization time
1553 * and currently never change thereafter.
1555 static void img_request_write_set(struct rbd_img_request *img_request)
1557 set_bit(IMG_REQ_WRITE, &img_request->flags);
1561 static bool img_request_write_test(struct rbd_img_request *img_request)
1564 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1567 static void img_request_child_set(struct rbd_img_request *img_request)
1569 set_bit(IMG_REQ_CHILD, &img_request->flags);
1573 static void img_request_child_clear(struct rbd_img_request *img_request)
1575 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1579 static bool img_request_child_test(struct rbd_img_request *img_request)
1582 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1585 static void img_request_layered_set(struct rbd_img_request *img_request)
1587 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1591 static void img_request_layered_clear(struct rbd_img_request *img_request)
1593 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1597 static bool img_request_layered_test(struct rbd_img_request *img_request)
1600 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1604 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1606 u64 xferred = obj_request->xferred;
1607 u64 length = obj_request->length;
1609 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1610 obj_request, obj_request->img_request, obj_request->result,
1613 * ENOENT means a hole in the image. We zero-fill the entire
1614 * length of the request. A short read also implies zero-fill
1615 * to the end of the request. An error requires the whole
1616 * length of the request to be reported finished with an error
1617 * to the block layer. In each case we update the xferred
1618 * count to indicate the whole request was satisfied.
1620 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1621 if (obj_request->result == -ENOENT) {
1622 if (obj_request->type == OBJ_REQUEST_BIO)
1623 zero_bio_chain(obj_request->bio_list, 0);
1625 zero_pages(obj_request->pages, 0, length);
1626 obj_request->result = 0;
1627 } else if (xferred < length && !obj_request->result) {
1628 if (obj_request->type == OBJ_REQUEST_BIO)
1629 zero_bio_chain(obj_request->bio_list, xferred);
1631 zero_pages(obj_request->pages, xferred, length);
1633 obj_request->xferred = length;
1634 obj_request_done_set(obj_request);
1637 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1639 dout("%s: obj %p cb %p\n", __func__, obj_request,
1640 obj_request->callback);
1641 if (obj_request->callback)
1642 obj_request->callback(obj_request);
1644 complete_all(&obj_request->completion);
1647 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1649 dout("%s: obj %p\n", __func__, obj_request);
1650 obj_request_done_set(obj_request);
1653 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1655 struct rbd_img_request *img_request = NULL;
1656 struct rbd_device *rbd_dev = NULL;
1657 bool layered = false;
1659 if (obj_request_img_data_test(obj_request)) {
1660 img_request = obj_request->img_request;
1661 layered = img_request && img_request_layered_test(img_request);
1662 rbd_dev = img_request->rbd_dev;
1665 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1666 obj_request, img_request, obj_request->result,
1667 obj_request->xferred, obj_request->length);
1668 if (layered && obj_request->result == -ENOENT &&
1669 obj_request->img_offset < rbd_dev->parent_overlap)
1670 rbd_img_parent_read(obj_request);
1671 else if (img_request)
1672 rbd_img_obj_request_read_callback(obj_request);
1674 obj_request_done_set(obj_request);
1677 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1679 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1680 obj_request->result, obj_request->length);
1682 * There is no such thing as a successful short write. Set
1683 * it to our originally-requested length.
1685 obj_request->xferred = obj_request->length;
1686 obj_request_done_set(obj_request);
1690 * For a simple stat call there's nothing to do. We'll do more if
1691 * this is part of a write sequence for a layered image.
1693 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1695 dout("%s: obj %p\n", __func__, obj_request);
1696 obj_request_done_set(obj_request);
1699 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1700 struct ceph_msg *msg)
1702 struct rbd_obj_request *obj_request = osd_req->r_priv;
1705 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1706 rbd_assert(osd_req == obj_request->osd_req);
1707 if (obj_request_img_data_test(obj_request)) {
1708 rbd_assert(obj_request->img_request);
1709 rbd_assert(obj_request->which != BAD_WHICH);
1711 rbd_assert(obj_request->which == BAD_WHICH);
1714 if (osd_req->r_result < 0)
1715 obj_request->result = osd_req->r_result;
1717 BUG_ON(osd_req->r_num_ops > 2);
1720 * We support a 64-bit length, but ultimately it has to be
1721 * passed to blk_end_request(), which takes an unsigned int.
1723 obj_request->xferred = osd_req->r_reply_op_len[0];
1724 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1725 opcode = osd_req->r_ops[0].op;
1727 case CEPH_OSD_OP_READ:
1728 rbd_osd_read_callback(obj_request);
1730 case CEPH_OSD_OP_WRITE:
1731 rbd_osd_write_callback(obj_request);
1733 case CEPH_OSD_OP_STAT:
1734 rbd_osd_stat_callback(obj_request);
1736 case CEPH_OSD_OP_CALL:
1737 case CEPH_OSD_OP_NOTIFY_ACK:
1738 case CEPH_OSD_OP_WATCH:
1739 rbd_osd_trivial_callback(obj_request);
1742 rbd_warn(NULL, "%s: unsupported op %hu\n",
1743 obj_request->object_name, (unsigned short) opcode);
1747 if (obj_request_done_test(obj_request))
1748 rbd_obj_request_complete(obj_request);
1751 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1753 struct rbd_img_request *img_request = obj_request->img_request;
1754 struct ceph_osd_request *osd_req = obj_request->osd_req;
1757 rbd_assert(osd_req != NULL);
1759 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1760 ceph_osdc_build_request(osd_req, obj_request->offset,
1761 NULL, snap_id, NULL);
1764 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1766 struct rbd_img_request *img_request = obj_request->img_request;
1767 struct ceph_osd_request *osd_req = obj_request->osd_req;
1768 struct ceph_snap_context *snapc;
1769 struct timespec mtime = CURRENT_TIME;
1771 rbd_assert(osd_req != NULL);
1773 snapc = img_request ? img_request->snapc : NULL;
1774 ceph_osdc_build_request(osd_req, obj_request->offset,
1775 snapc, CEPH_NOSNAP, &mtime);
1778 static struct ceph_osd_request *rbd_osd_req_create(
1779 struct rbd_device *rbd_dev,
1781 struct rbd_obj_request *obj_request)
1783 struct ceph_snap_context *snapc = NULL;
1784 struct ceph_osd_client *osdc;
1785 struct ceph_osd_request *osd_req;
1787 if (obj_request_img_data_test(obj_request)) {
1788 struct rbd_img_request *img_request = obj_request->img_request;
1790 rbd_assert(write_request ==
1791 img_request_write_test(img_request));
1793 snapc = img_request->snapc;
1796 /* Allocate and initialize the request, for the single op */
1798 osdc = &rbd_dev->rbd_client->client->osdc;
1799 osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
1801 return NULL; /* ENOMEM */
1804 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1806 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1808 osd_req->r_callback = rbd_osd_req_callback;
1809 osd_req->r_priv = obj_request;
1811 osd_req->r_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1813 osd_req->r_oid_len = strlen(obj_request->object_name);
1814 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1815 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1821 * Create a copyup osd request based on the information in the
1822 * object request supplied. A copyup request has two osd ops,
1823 * a copyup method call, and a "normal" write request.
1825 static struct ceph_osd_request *
1826 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1828 struct rbd_img_request *img_request;
1829 struct ceph_snap_context *snapc;
1830 struct rbd_device *rbd_dev;
1831 struct ceph_osd_client *osdc;
1832 struct ceph_osd_request *osd_req;
1834 rbd_assert(obj_request_img_data_test(obj_request));
1835 img_request = obj_request->img_request;
1836 rbd_assert(img_request);
1837 rbd_assert(img_request_write_test(img_request));
1839 /* Allocate and initialize the request, for the two ops */
1841 snapc = img_request->snapc;
1842 rbd_dev = img_request->rbd_dev;
1843 osdc = &rbd_dev->rbd_client->client->osdc;
1844 osd_req = ceph_osdc_alloc_request(osdc, snapc, 2, false, GFP_ATOMIC);
1846 return NULL; /* ENOMEM */
1848 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1849 osd_req->r_callback = rbd_osd_req_callback;
1850 osd_req->r_priv = obj_request;
1852 osd_req->r_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1854 osd_req->r_oid_len = strlen(obj_request->object_name);
1855 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1856 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1862 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1864 ceph_osdc_put_request(osd_req);
1867 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1869 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1870 u64 offset, u64 length,
1871 enum obj_request_type type)
1873 struct rbd_obj_request *obj_request;
1877 rbd_assert(obj_request_type_valid(type));
1879 size = strlen(object_name) + 1;
1880 name = kmalloc(size, GFP_KERNEL);
1884 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
1890 obj_request->object_name = memcpy(name, object_name, size);
1891 obj_request->offset = offset;
1892 obj_request->length = length;
1893 obj_request->flags = 0;
1894 obj_request->which = BAD_WHICH;
1895 obj_request->type = type;
1896 INIT_LIST_HEAD(&obj_request->links);
1897 init_completion(&obj_request->completion);
1898 kref_init(&obj_request->kref);
1900 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1901 offset, length, (int)type, obj_request);
1906 static void rbd_obj_request_destroy(struct kref *kref)
1908 struct rbd_obj_request *obj_request;
1910 obj_request = container_of(kref, struct rbd_obj_request, kref);
1912 dout("%s: obj %p\n", __func__, obj_request);
1914 rbd_assert(obj_request->img_request == NULL);
1915 rbd_assert(obj_request->which == BAD_WHICH);
1917 if (obj_request->osd_req)
1918 rbd_osd_req_destroy(obj_request->osd_req);
1920 rbd_assert(obj_request_type_valid(obj_request->type));
1921 switch (obj_request->type) {
1922 case OBJ_REQUEST_NODATA:
1923 break; /* Nothing to do */
1924 case OBJ_REQUEST_BIO:
1925 if (obj_request->bio_list)
1926 bio_chain_put(obj_request->bio_list);
1928 case OBJ_REQUEST_PAGES:
1929 if (obj_request->pages)
1930 ceph_release_page_vector(obj_request->pages,
1931 obj_request->page_count);
1935 kfree(obj_request->object_name);
1936 obj_request->object_name = NULL;
1937 kmem_cache_free(rbd_obj_request_cache, obj_request);
1940 /* It's OK to call this for a device with no parent */
1942 static void rbd_spec_put(struct rbd_spec *spec);
1943 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1945 rbd_dev_remove_parent(rbd_dev);
1946 rbd_spec_put(rbd_dev->parent_spec);
1947 rbd_dev->parent_spec = NULL;
1948 rbd_dev->parent_overlap = 0;
1952 * Parent image reference counting is used to determine when an
1953 * image's parent fields can be safely torn down--after there are no
1954 * more in-flight requests to the parent image. When the last
1955 * reference is dropped, cleaning them up is safe.
1957 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1961 if (!rbd_dev->parent_spec)
1964 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1968 /* Last reference; clean up parent data structures */
1971 rbd_dev_unparent(rbd_dev);
1973 rbd_warn(rbd_dev, "parent reference underflow\n");
1977 * If an image has a non-zero parent overlap, get a reference to its
1980 * We must get the reference before checking for the overlap to
1981 * coordinate properly with zeroing the parent overlap in
1982 * rbd_dev_v2_parent_info() when an image gets flattened. We
1983 * drop it again if there is no overlap.
1985 * Returns true if the rbd device has a parent with a non-zero
1986 * overlap and a reference for it was successfully taken, or
1989 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1993 if (!rbd_dev->parent_spec)
1996 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1997 if (counter > 0 && rbd_dev->parent_overlap)
2000 /* Image was flattened, but parent is not yet torn down */
2003 rbd_warn(rbd_dev, "parent reference overflow\n");
2009 * Caller is responsible for filling in the list of object requests
2010 * that comprises the image request, and the Linux request pointer
2011 * (if there is one).
2013 static struct rbd_img_request *rbd_img_request_create(
2014 struct rbd_device *rbd_dev,
2015 u64 offset, u64 length,
2018 struct rbd_img_request *img_request;
2020 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC);
2024 if (write_request) {
2025 down_read(&rbd_dev->header_rwsem);
2026 ceph_get_snap_context(rbd_dev->header.snapc);
2027 up_read(&rbd_dev->header_rwsem);
2030 img_request->rq = NULL;
2031 img_request->rbd_dev = rbd_dev;
2032 img_request->offset = offset;
2033 img_request->length = length;
2034 img_request->flags = 0;
2035 if (write_request) {
2036 img_request_write_set(img_request);
2037 img_request->snapc = rbd_dev->header.snapc;
2039 img_request->snap_id = rbd_dev->spec->snap_id;
2041 if (rbd_dev_parent_get(rbd_dev))
2042 img_request_layered_set(img_request);
2043 spin_lock_init(&img_request->completion_lock);
2044 img_request->next_completion = 0;
2045 img_request->callback = NULL;
2046 img_request->result = 0;
2047 img_request->obj_request_count = 0;
2048 INIT_LIST_HEAD(&img_request->obj_requests);
2049 kref_init(&img_request->kref);
2051 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2052 write_request ? "write" : "read", offset, length,
2058 static void rbd_img_request_destroy(struct kref *kref)
2060 struct rbd_img_request *img_request;
2061 struct rbd_obj_request *obj_request;
2062 struct rbd_obj_request *next_obj_request;
2064 img_request = container_of(kref, struct rbd_img_request, kref);
2066 dout("%s: img %p\n", __func__, img_request);
2068 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2069 rbd_img_obj_request_del(img_request, obj_request);
2070 rbd_assert(img_request->obj_request_count == 0);
2072 if (img_request_layered_test(img_request)) {
2073 img_request_layered_clear(img_request);
2074 rbd_dev_parent_put(img_request->rbd_dev);
2077 if (img_request_write_test(img_request))
2078 ceph_put_snap_context(img_request->snapc);
2080 kmem_cache_free(rbd_img_request_cache, img_request);
2083 static struct rbd_img_request *rbd_parent_request_create(
2084 struct rbd_obj_request *obj_request,
2085 u64 img_offset, u64 length)
2087 struct rbd_img_request *parent_request;
2088 struct rbd_device *rbd_dev;
2090 rbd_assert(obj_request->img_request);
2091 rbd_dev = obj_request->img_request->rbd_dev;
2093 parent_request = rbd_img_request_create(rbd_dev->parent,
2094 img_offset, length, false);
2095 if (!parent_request)
2098 img_request_child_set(parent_request);
2099 rbd_obj_request_get(obj_request);
2100 parent_request->obj_request = obj_request;
2102 return parent_request;
2105 static void rbd_parent_request_destroy(struct kref *kref)
2107 struct rbd_img_request *parent_request;
2108 struct rbd_obj_request *orig_request;
2110 parent_request = container_of(kref, struct rbd_img_request, kref);
2111 orig_request = parent_request->obj_request;
2113 parent_request->obj_request = NULL;
2114 rbd_obj_request_put(orig_request);
2115 img_request_child_clear(parent_request);
2117 rbd_img_request_destroy(kref);
2120 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2122 struct rbd_img_request *img_request;
2123 unsigned int xferred;
2127 rbd_assert(obj_request_img_data_test(obj_request));
2128 img_request = obj_request->img_request;
2130 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2131 xferred = (unsigned int)obj_request->xferred;
2132 result = obj_request->result;
2134 struct rbd_device *rbd_dev = img_request->rbd_dev;
2136 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
2137 img_request_write_test(img_request) ? "write" : "read",
2138 obj_request->length, obj_request->img_offset,
2139 obj_request->offset);
2140 rbd_warn(rbd_dev, " result %d xferred %x\n",
2142 if (!img_request->result)
2143 img_request->result = result;
2146 /* Image object requests don't own their page array */
2148 if (obj_request->type == OBJ_REQUEST_PAGES) {
2149 obj_request->pages = NULL;
2150 obj_request->page_count = 0;
2153 if (img_request_child_test(img_request)) {
2154 rbd_assert(img_request->obj_request != NULL);
2155 more = obj_request->which < img_request->obj_request_count - 1;
2157 rbd_assert(img_request->rq != NULL);
2158 more = blk_end_request(img_request->rq, result, xferred);
2164 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2166 struct rbd_img_request *img_request;
2167 u32 which = obj_request->which;
2170 rbd_assert(obj_request_img_data_test(obj_request));
2171 img_request = obj_request->img_request;
2173 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2174 rbd_assert(img_request != NULL);
2175 rbd_assert(img_request->obj_request_count > 0);
2176 rbd_assert(which != BAD_WHICH);
2177 rbd_assert(which < img_request->obj_request_count);
2178 rbd_assert(which >= img_request->next_completion);
2180 spin_lock_irq(&img_request->completion_lock);
2181 if (which != img_request->next_completion)
2184 for_each_obj_request_from(img_request, obj_request) {
2186 rbd_assert(which < img_request->obj_request_count);
2188 if (!obj_request_done_test(obj_request))
2190 more = rbd_img_obj_end_request(obj_request);
2194 rbd_assert(more ^ (which == img_request->obj_request_count));
2195 img_request->next_completion = which;
2197 spin_unlock_irq(&img_request->completion_lock);
2200 rbd_img_request_complete(img_request);
2204 * Split up an image request into one or more object requests, each
2205 * to a different object. The "type" parameter indicates whether
2206 * "data_desc" is the pointer to the head of a list of bio
2207 * structures, or the base of a page array. In either case this
2208 * function assumes data_desc describes memory sufficient to hold
2209 * all data described by the image request.
2211 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2212 enum obj_request_type type,
2215 struct rbd_device *rbd_dev = img_request->rbd_dev;
2216 struct rbd_obj_request *obj_request = NULL;
2217 struct rbd_obj_request *next_obj_request;
2218 bool write_request = img_request_write_test(img_request);
2219 struct bio *bio_list = NULL;
2220 unsigned int bio_offset = 0;
2221 struct page **pages = NULL;
2226 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2227 (int)type, data_desc);
2229 opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
2230 img_offset = img_request->offset;
2231 resid = img_request->length;
2232 rbd_assert(resid > 0);
2234 if (type == OBJ_REQUEST_BIO) {
2235 bio_list = data_desc;
2236 rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
2238 rbd_assert(type == OBJ_REQUEST_PAGES);
2243 struct ceph_osd_request *osd_req;
2244 const char *object_name;
2248 object_name = rbd_segment_name(rbd_dev, img_offset);
2251 offset = rbd_segment_offset(rbd_dev, img_offset);
2252 length = rbd_segment_length(rbd_dev, img_offset, resid);
2253 obj_request = rbd_obj_request_create(object_name,
2254 offset, length, type);
2255 /* object request has its own copy of the object name */
2256 rbd_segment_name_free(object_name);
2260 * set obj_request->img_request before creating the
2261 * osd_request so that it gets the right snapc
2263 rbd_img_obj_request_add(img_request, obj_request);
2265 if (type == OBJ_REQUEST_BIO) {
2266 unsigned int clone_size;
2268 rbd_assert(length <= (u64)UINT_MAX);
2269 clone_size = (unsigned int)length;
2270 obj_request->bio_list =
2271 bio_chain_clone_range(&bio_list,
2275 if (!obj_request->bio_list)
2278 unsigned int page_count;
2280 obj_request->pages = pages;
2281 page_count = (u32)calc_pages_for(offset, length);
2282 obj_request->page_count = page_count;
2283 if ((offset + length) & ~PAGE_MASK)
2284 page_count--; /* more on last page */
2285 pages += page_count;
2288 osd_req = rbd_osd_req_create(rbd_dev, write_request,
2292 obj_request->osd_req = osd_req;
2293 obj_request->callback = rbd_img_obj_callback;
2295 osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
2297 if (type == OBJ_REQUEST_BIO)
2298 osd_req_op_extent_osd_data_bio(osd_req, 0,
2299 obj_request->bio_list, length);
2301 osd_req_op_extent_osd_data_pages(osd_req, 0,
2302 obj_request->pages, length,
2303 offset & ~PAGE_MASK, false, false);
2306 rbd_osd_req_format_write(obj_request);
2308 rbd_osd_req_format_read(obj_request);
2310 obj_request->img_offset = img_offset;
2312 img_offset += length;
2319 rbd_obj_request_put(obj_request);
2321 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2322 rbd_obj_request_put(obj_request);
2328 rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2330 struct rbd_img_request *img_request;
2331 struct rbd_device *rbd_dev;
2332 struct page **pages;
2335 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2336 rbd_assert(obj_request_img_data_test(obj_request));
2337 img_request = obj_request->img_request;
2338 rbd_assert(img_request);
2340 rbd_dev = img_request->rbd_dev;
2341 rbd_assert(rbd_dev);
2343 pages = obj_request->copyup_pages;
2344 rbd_assert(pages != NULL);
2345 obj_request->copyup_pages = NULL;
2346 page_count = obj_request->copyup_page_count;
2347 rbd_assert(page_count);
2348 obj_request->copyup_page_count = 0;
2349 ceph_release_page_vector(pages, page_count);
2352 * We want the transfer count to reflect the size of the
2353 * original write request. There is no such thing as a
2354 * successful short write, so if the request was successful
2355 * we can just set it to the originally-requested length.
2357 if (!obj_request->result)
2358 obj_request->xferred = obj_request->length;
2360 /* Finish up with the normal image object callback */
2362 rbd_img_obj_callback(obj_request);
2366 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2368 struct rbd_obj_request *orig_request;
2369 struct ceph_osd_request *osd_req;
2370 struct ceph_osd_client *osdc;
2371 struct rbd_device *rbd_dev;
2372 struct page **pages;
2379 rbd_assert(img_request_child_test(img_request));
2381 /* First get what we need from the image request */
2383 pages = img_request->copyup_pages;
2384 rbd_assert(pages != NULL);
2385 img_request->copyup_pages = NULL;
2386 page_count = img_request->copyup_page_count;
2387 rbd_assert(page_count);
2388 img_request->copyup_page_count = 0;
2390 orig_request = img_request->obj_request;
2391 rbd_assert(orig_request != NULL);
2392 rbd_assert(obj_request_type_valid(orig_request->type));
2393 img_result = img_request->result;
2394 parent_length = img_request->length;
2395 rbd_assert(parent_length == img_request->xferred);
2396 rbd_img_request_put(img_request);
2398 rbd_assert(orig_request->img_request);
2399 rbd_dev = orig_request->img_request->rbd_dev;
2400 rbd_assert(rbd_dev);
2403 * If the overlap has become 0 (most likely because the
2404 * image has been flattened) we need to free the pages
2405 * and re-submit the original write request.
2407 if (!rbd_dev->parent_overlap) {
2408 struct ceph_osd_client *osdc;
2410 ceph_release_page_vector(pages, page_count);
2411 osdc = &rbd_dev->rbd_client->client->osdc;
2412 img_result = rbd_obj_request_submit(osdc, orig_request);
2421 * The original osd request is of no use to use any more.
2422 * We need a new one that can hold the two ops in a copyup
2423 * request. Allocate the new copyup osd request for the
2424 * original request, and release the old one.
2426 img_result = -ENOMEM;
2427 osd_req = rbd_osd_req_create_copyup(orig_request);
2430 rbd_osd_req_destroy(orig_request->osd_req);
2431 orig_request->osd_req = osd_req;
2432 orig_request->copyup_pages = pages;
2433 orig_request->copyup_page_count = page_count;
2435 /* Initialize the copyup op */
2437 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2438 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2441 /* Then the original write request op */
2443 offset = orig_request->offset;
2444 length = orig_request->length;
2445 osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
2446 offset, length, 0, 0);
2447 if (orig_request->type == OBJ_REQUEST_BIO)
2448 osd_req_op_extent_osd_data_bio(osd_req, 1,
2449 orig_request->bio_list, length);
2451 osd_req_op_extent_osd_data_pages(osd_req, 1,
2452 orig_request->pages, length,
2453 offset & ~PAGE_MASK, false, false);
2455 rbd_osd_req_format_write(orig_request);
2457 /* All set, send it off. */
2459 orig_request->callback = rbd_img_obj_copyup_callback;
2460 osdc = &rbd_dev->rbd_client->client->osdc;
2461 img_result = rbd_obj_request_submit(osdc, orig_request);
2465 /* Record the error code and complete the request */
2467 orig_request->result = img_result;
2468 orig_request->xferred = 0;
2469 obj_request_done_set(orig_request);
2470 rbd_obj_request_complete(orig_request);
2474 * Read from the parent image the range of data that covers the
2475 * entire target of the given object request. This is used for
2476 * satisfying a layered image write request when the target of an
2477 * object request from the image request does not exist.
2479 * A page array big enough to hold the returned data is allocated
2480 * and supplied to rbd_img_request_fill() as the "data descriptor."
2481 * When the read completes, this page array will be transferred to
2482 * the original object request for the copyup operation.
2484 * If an error occurs, record it as the result of the original
2485 * object request and mark it done so it gets completed.
2487 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2489 struct rbd_img_request *img_request = NULL;
2490 struct rbd_img_request *parent_request = NULL;
2491 struct rbd_device *rbd_dev;
2494 struct page **pages = NULL;
2498 rbd_assert(obj_request_img_data_test(obj_request));
2499 rbd_assert(obj_request_type_valid(obj_request->type));
2501 img_request = obj_request->img_request;
2502 rbd_assert(img_request != NULL);
2503 rbd_dev = img_request->rbd_dev;
2504 rbd_assert(rbd_dev->parent != NULL);
2507 * Determine the byte range covered by the object in the
2508 * child image to which the original request was to be sent.
2510 img_offset = obj_request->img_offset - obj_request->offset;
2511 length = (u64)1 << rbd_dev->header.obj_order;
2514 * There is no defined parent data beyond the parent
2515 * overlap, so limit what we read at that boundary if
2518 if (img_offset + length > rbd_dev->parent_overlap) {
2519 rbd_assert(img_offset < rbd_dev->parent_overlap);
2520 length = rbd_dev->parent_overlap - img_offset;
2524 * Allocate a page array big enough to receive the data read
2527 page_count = (u32)calc_pages_for(0, length);
2528 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2529 if (IS_ERR(pages)) {
2530 result = PTR_ERR(pages);
2536 parent_request = rbd_parent_request_create(obj_request,
2537 img_offset, length);
2538 if (!parent_request)
2541 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2544 parent_request->copyup_pages = pages;
2545 parent_request->copyup_page_count = page_count;
2547 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2548 result = rbd_img_request_submit(parent_request);
2552 parent_request->copyup_pages = NULL;
2553 parent_request->copyup_page_count = 0;
2554 parent_request->obj_request = NULL;
2555 rbd_obj_request_put(obj_request);
2558 ceph_release_page_vector(pages, page_count);
2560 rbd_img_request_put(parent_request);
2561 obj_request->result = result;
2562 obj_request->xferred = 0;
2563 obj_request_done_set(obj_request);
2568 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2570 struct rbd_obj_request *orig_request;
2571 struct rbd_device *rbd_dev;
2574 rbd_assert(!obj_request_img_data_test(obj_request));
2577 * All we need from the object request is the original
2578 * request and the result of the STAT op. Grab those, then
2579 * we're done with the request.
2581 orig_request = obj_request->obj_request;
2582 obj_request->obj_request = NULL;
2583 rbd_obj_request_put(orig_request);
2584 rbd_assert(orig_request);
2585 rbd_assert(orig_request->img_request);
2587 result = obj_request->result;
2588 obj_request->result = 0;
2590 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2591 obj_request, orig_request, result,
2592 obj_request->xferred, obj_request->length);
2593 rbd_obj_request_put(obj_request);
2596 * If the overlap has become 0 (most likely because the
2597 * image has been flattened) we need to free the pages
2598 * and re-submit the original write request.
2600 rbd_dev = orig_request->img_request->rbd_dev;
2601 if (!rbd_dev->parent_overlap) {
2602 struct ceph_osd_client *osdc;
2604 osdc = &rbd_dev->rbd_client->client->osdc;
2605 result = rbd_obj_request_submit(osdc, orig_request);
2611 * Our only purpose here is to determine whether the object
2612 * exists, and we don't want to treat the non-existence as
2613 * an error. If something else comes back, transfer the
2614 * error to the original request and complete it now.
2617 obj_request_existence_set(orig_request, true);
2618 } else if (result == -ENOENT) {
2619 obj_request_existence_set(orig_request, false);
2620 } else if (result) {
2621 orig_request->result = result;
2626 * Resubmit the original request now that we have recorded
2627 * whether the target object exists.
2629 orig_request->result = rbd_img_obj_request_submit(orig_request);
2631 if (orig_request->result)
2632 rbd_obj_request_complete(orig_request);
2635 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2637 struct rbd_obj_request *stat_request;
2638 struct rbd_device *rbd_dev;
2639 struct ceph_osd_client *osdc;
2640 struct page **pages = NULL;
2646 * The response data for a STAT call consists of:
2653 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2654 page_count = (u32)calc_pages_for(0, size);
2655 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2657 return PTR_ERR(pages);
2660 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2665 rbd_obj_request_get(obj_request);
2666 stat_request->obj_request = obj_request;
2667 stat_request->pages = pages;
2668 stat_request->page_count = page_count;
2670 rbd_assert(obj_request->img_request);
2671 rbd_dev = obj_request->img_request->rbd_dev;
2672 stat_request->osd_req = rbd_osd_req_create(rbd_dev, false,
2674 if (!stat_request->osd_req)
2676 stat_request->callback = rbd_img_obj_exists_callback;
2678 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2679 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2681 rbd_osd_req_format_read(stat_request);
2683 osdc = &rbd_dev->rbd_client->client->osdc;
2684 ret = rbd_obj_request_submit(osdc, stat_request);
2687 rbd_obj_request_put(obj_request);
2692 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2694 struct rbd_img_request *img_request;
2695 struct rbd_device *rbd_dev;
2698 rbd_assert(obj_request_img_data_test(obj_request));
2700 img_request = obj_request->img_request;
2701 rbd_assert(img_request);
2702 rbd_dev = img_request->rbd_dev;
2705 * Only writes to layered images need special handling.
2706 * Reads and non-layered writes are simple object requests.
2707 * Layered writes that start beyond the end of the overlap
2708 * with the parent have no parent data, so they too are
2709 * simple object requests. Finally, if the target object is
2710 * known to already exist, its parent data has already been
2711 * copied, so a write to the object can also be handled as a
2712 * simple object request.
2714 if (!img_request_write_test(img_request) ||
2715 !img_request_layered_test(img_request) ||
2716 rbd_dev->parent_overlap <= obj_request->img_offset ||
2717 ((known = obj_request_known_test(obj_request)) &&
2718 obj_request_exists_test(obj_request))) {
2720 struct rbd_device *rbd_dev;
2721 struct ceph_osd_client *osdc;
2723 rbd_dev = obj_request->img_request->rbd_dev;
2724 osdc = &rbd_dev->rbd_client->client->osdc;
2726 return rbd_obj_request_submit(osdc, obj_request);
2730 * It's a layered write. The target object might exist but
2731 * we may not know that yet. If we know it doesn't exist,
2732 * start by reading the data for the full target object from
2733 * the parent so we can use it for a copyup to the target.
2736 return rbd_img_obj_parent_read_full(obj_request);
2738 /* We don't know whether the target exists. Go find out. */
2740 return rbd_img_obj_exists_submit(obj_request);
2743 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2745 struct rbd_obj_request *obj_request;
2746 struct rbd_obj_request *next_obj_request;
2748 dout("%s: img %p\n", __func__, img_request);
2749 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2752 ret = rbd_img_obj_request_submit(obj_request);
2760 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2762 struct rbd_obj_request *obj_request;
2763 struct rbd_device *rbd_dev;
2768 rbd_assert(img_request_child_test(img_request));
2770 /* First get what we need from the image request and release it */
2772 obj_request = img_request->obj_request;
2773 img_xferred = img_request->xferred;
2774 img_result = img_request->result;
2775 rbd_img_request_put(img_request);
2778 * If the overlap has become 0 (most likely because the
2779 * image has been flattened) we need to re-submit the
2782 rbd_assert(obj_request);
2783 rbd_assert(obj_request->img_request);
2784 rbd_dev = obj_request->img_request->rbd_dev;
2785 if (!rbd_dev->parent_overlap) {
2786 struct ceph_osd_client *osdc;
2788 osdc = &rbd_dev->rbd_client->client->osdc;
2789 img_result = rbd_obj_request_submit(osdc, obj_request);
2794 obj_request->result = img_result;
2795 if (obj_request->result)
2799 * We need to zero anything beyond the parent overlap
2800 * boundary. Since rbd_img_obj_request_read_callback()
2801 * will zero anything beyond the end of a short read, an
2802 * easy way to do this is to pretend the data from the
2803 * parent came up short--ending at the overlap boundary.
2805 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2806 obj_end = obj_request->img_offset + obj_request->length;
2807 if (obj_end > rbd_dev->parent_overlap) {
2810 if (obj_request->img_offset < rbd_dev->parent_overlap)
2811 xferred = rbd_dev->parent_overlap -
2812 obj_request->img_offset;
2814 obj_request->xferred = min(img_xferred, xferred);
2816 obj_request->xferred = img_xferred;
2819 rbd_img_obj_request_read_callback(obj_request);
2820 rbd_obj_request_complete(obj_request);
2823 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2825 struct rbd_img_request *img_request;
2828 rbd_assert(obj_request_img_data_test(obj_request));
2829 rbd_assert(obj_request->img_request != NULL);
2830 rbd_assert(obj_request->result == (s32) -ENOENT);
2831 rbd_assert(obj_request_type_valid(obj_request->type));
2833 /* rbd_read_finish(obj_request, obj_request->length); */
2834 img_request = rbd_parent_request_create(obj_request,
2835 obj_request->img_offset,
2836 obj_request->length);
2841 if (obj_request->type == OBJ_REQUEST_BIO)
2842 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2843 obj_request->bio_list);
2845 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
2846 obj_request->pages);
2850 img_request->callback = rbd_img_parent_read_callback;
2851 result = rbd_img_request_submit(img_request);
2858 rbd_img_request_put(img_request);
2859 obj_request->result = result;
2860 obj_request->xferred = 0;
2861 obj_request_done_set(obj_request);
2864 static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
2866 struct rbd_obj_request *obj_request;
2867 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2870 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2871 OBJ_REQUEST_NODATA);
2876 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2877 if (!obj_request->osd_req)
2880 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
2882 rbd_osd_req_format_read(obj_request);
2884 ret = rbd_obj_request_submit(osdc, obj_request);
2887 ret = rbd_obj_request_wait(obj_request);
2889 rbd_obj_request_put(obj_request);
2894 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2896 struct rbd_device *rbd_dev = (struct rbd_device *)data;
2902 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
2903 rbd_dev->header_name, (unsigned long long)notify_id,
2904 (unsigned int)opcode);
2905 ret = rbd_dev_refresh(rbd_dev);
2907 rbd_warn(rbd_dev, "header refresh error (%d)\n", ret);
2909 rbd_obj_notify_ack_sync(rbd_dev, notify_id);
2913 * Request sync osd watch/unwatch. The value of "start" determines
2914 * whether a watch request is being initiated or torn down.
2916 static int __rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start)
2918 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2919 struct rbd_obj_request *obj_request;
2922 rbd_assert(start ^ !!rbd_dev->watch_event);
2923 rbd_assert(start ^ !!rbd_dev->watch_request);
2926 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
2927 &rbd_dev->watch_event);
2930 rbd_assert(rbd_dev->watch_event != NULL);
2934 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2935 OBJ_REQUEST_NODATA);
2939 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request);
2940 if (!obj_request->osd_req)
2944 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
2946 ceph_osdc_unregister_linger_request(osdc,
2947 rbd_dev->watch_request->osd_req);
2949 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2950 rbd_dev->watch_event->cookie, 0, start ? 1 : 0);
2951 rbd_osd_req_format_write(obj_request);
2953 ret = rbd_obj_request_submit(osdc, obj_request);
2956 ret = rbd_obj_request_wait(obj_request);
2959 ret = obj_request->result;
2964 * A watch request is set to linger, so the underlying osd
2965 * request won't go away until we unregister it. We retain
2966 * a pointer to the object request during that time (in
2967 * rbd_dev->watch_request), so we'll keep a reference to
2968 * it. We'll drop that reference (below) after we've
2972 rbd_dev->watch_request = obj_request;
2977 /* We have successfully torn down the watch request */
2979 rbd_obj_request_put(rbd_dev->watch_request);
2980 rbd_dev->watch_request = NULL;
2982 /* Cancel the event if we're tearing down, or on error */
2983 ceph_osdc_cancel_event(rbd_dev->watch_event);
2984 rbd_dev->watch_event = NULL;
2986 rbd_obj_request_put(obj_request);
2991 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
2993 return __rbd_dev_header_watch_sync(rbd_dev, true);
2996 static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3000 ret = __rbd_dev_header_watch_sync(rbd_dev, false);
3002 rbd_warn(rbd_dev, "unable to tear down watch request: %d\n",
3008 * Synchronous osd object method call. Returns the number of bytes
3009 * returned in the outbound buffer, or a negative error code.
3011 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3012 const char *object_name,
3013 const char *class_name,
3014 const char *method_name,
3015 const void *outbound,
3016 size_t outbound_size,
3018 size_t inbound_size)
3020 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3021 struct rbd_obj_request *obj_request;
3022 struct page **pages;
3027 * Method calls are ultimately read operations. The result
3028 * should placed into the inbound buffer provided. They
3029 * also supply outbound data--parameters for the object
3030 * method. Currently if this is present it will be a
3033 page_count = (u32)calc_pages_for(0, inbound_size);
3034 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3036 return PTR_ERR(pages);
3039 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
3044 obj_request->pages = pages;
3045 obj_request->page_count = page_count;
3047 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
3048 if (!obj_request->osd_req)
3051 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
3052 class_name, method_name);
3053 if (outbound_size) {
3054 struct ceph_pagelist *pagelist;
3056 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
3060 ceph_pagelist_init(pagelist);
3061 ceph_pagelist_append(pagelist, outbound, outbound_size);
3062 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3065 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3066 obj_request->pages, inbound_size,
3068 rbd_osd_req_format_read(obj_request);
3070 ret = rbd_obj_request_submit(osdc, obj_request);
3073 ret = rbd_obj_request_wait(obj_request);
3077 ret = obj_request->result;
3081 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3082 ret = (int)obj_request->xferred;
3083 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3086 rbd_obj_request_put(obj_request);
3088 ceph_release_page_vector(pages, page_count);
3093 static void rbd_request_fn(struct request_queue *q)
3094 __releases(q->queue_lock) __acquires(q->queue_lock)
3096 struct rbd_device *rbd_dev = q->queuedata;
3097 bool read_only = rbd_dev->mapping.read_only;
3101 while ((rq = blk_fetch_request(q))) {
3102 bool write_request = rq_data_dir(rq) == WRITE;
3103 struct rbd_img_request *img_request;
3107 /* Ignore any non-FS requests that filter through. */
3109 if (rq->cmd_type != REQ_TYPE_FS) {
3110 dout("%s: non-fs request type %d\n", __func__,
3111 (int) rq->cmd_type);
3112 __blk_end_request_all(rq, 0);
3116 /* Ignore/skip any zero-length requests */
3118 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
3119 length = (u64) blk_rq_bytes(rq);
3122 dout("%s: zero-length request\n", __func__);
3123 __blk_end_request_all(rq, 0);
3127 spin_unlock_irq(q->queue_lock);
3129 /* Disallow writes to a read-only device */
3131 if (write_request) {
3135 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3139 * Quit early if the mapped snapshot no longer
3140 * exists. It's still possible the snapshot will
3141 * have disappeared by the time our request arrives
3142 * at the osd, but there's no sense in sending it if
3145 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3146 dout("request for non-existent snapshot");
3147 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3153 if (offset && length > U64_MAX - offset + 1) {
3154 rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
3156 goto end_request; /* Shouldn't happen */
3160 if (offset + length > rbd_dev->mapping.size) {
3161 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
3162 offset, length, rbd_dev->mapping.size);
3167 img_request = rbd_img_request_create(rbd_dev, offset, length,
3172 img_request->rq = rq;
3174 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3177 result = rbd_img_request_submit(img_request);
3179 rbd_img_request_put(img_request);
3181 spin_lock_irq(q->queue_lock);
3183 rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
3184 write_request ? "write" : "read",
3185 length, offset, result);
3187 __blk_end_request_all(rq, result);
3193 * a queue callback. Makes sure that we don't create a bio that spans across
3194 * multiple osd objects. One exception would be with a single page bios,
3195 * which we handle later at bio_chain_clone_range()
3197 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
3198 struct bio_vec *bvec)
3200 struct rbd_device *rbd_dev = q->queuedata;
3201 sector_t sector_offset;
3202 sector_t sectors_per_obj;
3203 sector_t obj_sector_offset;
3207 * Find how far into its rbd object the partition-relative
3208 * bio start sector is to offset relative to the enclosing
3211 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
3212 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
3213 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
3216 * Compute the number of bytes from that offset to the end
3217 * of the object. Account for what's already used by the bio.
3219 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
3220 if (ret > bmd->bi_size)
3221 ret -= bmd->bi_size;
3226 * Don't send back more than was asked for. And if the bio
3227 * was empty, let the whole thing through because: "Note
3228 * that a block device *must* allow a single page to be
3229 * added to an empty bio."
3231 rbd_assert(bvec->bv_len <= PAGE_SIZE);
3232 if (ret > (int) bvec->bv_len || !bmd->bi_size)
3233 ret = (int) bvec->bv_len;
3238 static void rbd_free_disk(struct rbd_device *rbd_dev)
3240 struct gendisk *disk = rbd_dev->disk;
3245 rbd_dev->disk = NULL;
3246 if (disk->flags & GENHD_FL_UP) {
3249 blk_cleanup_queue(disk->queue);
3254 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3255 const char *object_name,
3256 u64 offset, u64 length, void *buf)
3259 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3260 struct rbd_obj_request *obj_request;
3261 struct page **pages = NULL;
3266 page_count = (u32) calc_pages_for(offset, length);
3267 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3269 ret = PTR_ERR(pages);
3272 obj_request = rbd_obj_request_create(object_name, offset, length,
3277 obj_request->pages = pages;
3278 obj_request->page_count = page_count;
3280 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
3281 if (!obj_request->osd_req)
3284 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3285 offset, length, 0, 0);
3286 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3288 obj_request->length,
3289 obj_request->offset & ~PAGE_MASK,
3291 rbd_osd_req_format_read(obj_request);
3293 ret = rbd_obj_request_submit(osdc, obj_request);
3296 ret = rbd_obj_request_wait(obj_request);
3300 ret = obj_request->result;
3304 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3305 size = (size_t) obj_request->xferred;
3306 ceph_copy_from_page_vector(pages, buf, 0, size);
3307 rbd_assert(size <= (size_t)INT_MAX);
3311 rbd_obj_request_put(obj_request);
3313 ceph_release_page_vector(pages, page_count);
3319 * Read the complete header for the given rbd device. On successful
3320 * return, the rbd_dev->header field will contain up-to-date
3321 * information about the image.
3323 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3325 struct rbd_image_header_ondisk *ondisk = NULL;
3332 * The complete header will include an array of its 64-bit
3333 * snapshot ids, followed by the names of those snapshots as
3334 * a contiguous block of NUL-terminated strings. Note that
3335 * the number of snapshots could change by the time we read
3336 * it in, in which case we re-read it.
3343 size = sizeof (*ondisk);
3344 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3346 ondisk = kmalloc(size, GFP_KERNEL);
3350 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3354 if ((size_t)ret < size) {
3356 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3360 if (!rbd_dev_ondisk_valid(ondisk)) {
3362 rbd_warn(rbd_dev, "invalid header");
3366 names_size = le64_to_cpu(ondisk->snap_names_len);
3367 want_count = snap_count;
3368 snap_count = le32_to_cpu(ondisk->snap_count);
3369 } while (snap_count != want_count);
3371 ret = rbd_header_from_disk(rbd_dev, ondisk);
3379 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3380 * has disappeared from the (just updated) snapshot context.
3382 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3386 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3389 snap_id = rbd_dev->spec->snap_id;
3390 if (snap_id == CEPH_NOSNAP)
3393 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3394 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3397 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3403 * Don't hold the lock while doing disk operations,
3404 * or lock ordering will conflict with the bdev mutex via:
3405 * rbd_add() -> blkdev_get() -> rbd_open()
3407 spin_lock_irq(&rbd_dev->lock);
3408 removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
3409 spin_unlock_irq(&rbd_dev->lock);
3411 * If the device is being removed, rbd_dev->disk has
3412 * been destroyed, so don't try to update its size
3415 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3416 dout("setting size to %llu sectors", (unsigned long long)size);
3417 set_capacity(rbd_dev->disk, size);
3418 revalidate_disk(rbd_dev->disk);
3422 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3427 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
3428 down_write(&rbd_dev->header_rwsem);
3429 mapping_size = rbd_dev->mapping.size;
3430 if (rbd_dev->image_format == 1)
3431 ret = rbd_dev_v1_header_info(rbd_dev);
3433 ret = rbd_dev_v2_header_info(rbd_dev);
3435 /* If it's a mapped snapshot, validate its EXISTS flag */
3437 rbd_exists_validate(rbd_dev);
3438 up_write(&rbd_dev->header_rwsem);
3440 if (mapping_size != rbd_dev->mapping.size) {
3441 rbd_dev_update_size(rbd_dev);
3447 static int rbd_init_disk(struct rbd_device *rbd_dev)
3449 struct gendisk *disk;
3450 struct request_queue *q;
3453 /* create gendisk info */
3454 disk = alloc_disk(single_major ?
3455 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3456 RBD_MINORS_PER_MAJOR);
3460 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3462 disk->major = rbd_dev->major;
3463 disk->first_minor = rbd_dev->minor;
3465 disk->flags |= GENHD_FL_EXT_DEVT;
3466 disk->fops = &rbd_bd_ops;
3467 disk->private_data = rbd_dev;
3469 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
3473 /* We use the default size, but let's be explicit about it. */
3474 blk_queue_physical_block_size(q, SECTOR_SIZE);
3476 /* set io sizes to object size */
3477 segment_size = rbd_obj_bytes(&rbd_dev->header);
3478 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3479 blk_queue_max_segment_size(q, segment_size);
3480 blk_queue_io_min(q, segment_size);
3481 blk_queue_io_opt(q, segment_size);
3483 blk_queue_merge_bvec(q, rbd_merge_bvec);
3486 q->queuedata = rbd_dev;
3488 rbd_dev->disk = disk;
3501 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3503 return container_of(dev, struct rbd_device, dev);
3506 static ssize_t rbd_size_show(struct device *dev,
3507 struct device_attribute *attr, char *buf)
3509 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3511 return sprintf(buf, "%llu\n",
3512 (unsigned long long)rbd_dev->mapping.size);
3516 * Note this shows the features for whatever's mapped, which is not
3517 * necessarily the base image.
3519 static ssize_t rbd_features_show(struct device *dev,
3520 struct device_attribute *attr, char *buf)
3522 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3524 return sprintf(buf, "0x%016llx\n",
3525 (unsigned long long)rbd_dev->mapping.features);
3528 static ssize_t rbd_major_show(struct device *dev,
3529 struct device_attribute *attr, char *buf)
3531 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3534 return sprintf(buf, "%d\n", rbd_dev->major);
3536 return sprintf(buf, "(none)\n");
3539 static ssize_t rbd_minor_show(struct device *dev,
3540 struct device_attribute *attr, char *buf)
3542 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3544 return sprintf(buf, "%d\n", rbd_dev->minor);
3547 static ssize_t rbd_client_id_show(struct device *dev,
3548 struct device_attribute *attr, char *buf)
3550 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3552 return sprintf(buf, "client%lld\n",
3553 ceph_client_id(rbd_dev->rbd_client->client));
3556 static ssize_t rbd_pool_show(struct device *dev,
3557 struct device_attribute *attr, char *buf)
3559 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3561 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3564 static ssize_t rbd_pool_id_show(struct device *dev,
3565 struct device_attribute *attr, char *buf)
3567 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3569 return sprintf(buf, "%llu\n",
3570 (unsigned long long) rbd_dev->spec->pool_id);
3573 static ssize_t rbd_name_show(struct device *dev,
3574 struct device_attribute *attr, char *buf)
3576 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3578 if (rbd_dev->spec->image_name)
3579 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3581 return sprintf(buf, "(unknown)\n");
3584 static ssize_t rbd_image_id_show(struct device *dev,
3585 struct device_attribute *attr, char *buf)
3587 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3589 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3593 * Shows the name of the currently-mapped snapshot (or
3594 * RBD_SNAP_HEAD_NAME for the base image).
3596 static ssize_t rbd_snap_show(struct device *dev,
3597 struct device_attribute *attr,
3600 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3602 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3606 * For an rbd v2 image, shows the pool id, image id, and snapshot id
3607 * for the parent image. If there is no parent, simply shows
3608 * "(no parent image)".
3610 static ssize_t rbd_parent_show(struct device *dev,
3611 struct device_attribute *attr,
3614 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3615 struct rbd_spec *spec = rbd_dev->parent_spec;
3620 return sprintf(buf, "(no parent image)\n");
3622 count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
3623 (unsigned long long) spec->pool_id, spec->pool_name);
3628 count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
3629 spec->image_name ? spec->image_name : "(unknown)");
3634 count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
3635 (unsigned long long) spec->snap_id, spec->snap_name);
3640 count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
3645 return (ssize_t) (bufp - buf);
3648 static ssize_t rbd_image_refresh(struct device *dev,
3649 struct device_attribute *attr,
3653 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3656 ret = rbd_dev_refresh(rbd_dev);
3658 rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret);
3660 return ret < 0 ? ret : size;
3663 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3664 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3665 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3666 static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
3667 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3668 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3669 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3670 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3671 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3672 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3673 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3674 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3676 static struct attribute *rbd_attrs[] = {
3677 &dev_attr_size.attr,
3678 &dev_attr_features.attr,
3679 &dev_attr_major.attr,
3680 &dev_attr_minor.attr,
3681 &dev_attr_client_id.attr,
3682 &dev_attr_pool.attr,
3683 &dev_attr_pool_id.attr,
3684 &dev_attr_name.attr,
3685 &dev_attr_image_id.attr,
3686 &dev_attr_current_snap.attr,
3687 &dev_attr_parent.attr,
3688 &dev_attr_refresh.attr,
3692 static struct attribute_group rbd_attr_group = {
3696 static const struct attribute_group *rbd_attr_groups[] = {
3701 static void rbd_sysfs_dev_release(struct device *dev)
3705 static struct device_type rbd_device_type = {
3707 .groups = rbd_attr_groups,
3708 .release = rbd_sysfs_dev_release,
3711 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3713 kref_get(&spec->kref);
3718 static void rbd_spec_free(struct kref *kref);
3719 static void rbd_spec_put(struct rbd_spec *spec)
3722 kref_put(&spec->kref, rbd_spec_free);
3725 static struct rbd_spec *rbd_spec_alloc(void)
3727 struct rbd_spec *spec;
3729 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3732 kref_init(&spec->kref);
3737 static void rbd_spec_free(struct kref *kref)
3739 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3741 kfree(spec->pool_name);
3742 kfree(spec->image_id);
3743 kfree(spec->image_name);
3744 kfree(spec->snap_name);
3748 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
3749 struct rbd_spec *spec)
3751 struct rbd_device *rbd_dev;
3753 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3757 spin_lock_init(&rbd_dev->lock);
3759 atomic_set(&rbd_dev->parent_ref, 0);
3760 INIT_LIST_HEAD(&rbd_dev->node);
3761 init_rwsem(&rbd_dev->header_rwsem);
3763 rbd_dev->spec = spec;
3764 rbd_dev->rbd_client = rbdc;
3766 /* Initialize the layout used for all rbd requests */
3768 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3769 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3770 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3771 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3776 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3778 rbd_put_client(rbd_dev->rbd_client);
3779 rbd_spec_put(rbd_dev->spec);
3784 * Get the size and object order for an image snapshot, or if
3785 * snap_id is CEPH_NOSNAP, gets this information for the base
3788 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3789 u8 *order, u64 *snap_size)
3791 __le64 snapid = cpu_to_le64(snap_id);
3796 } __attribute__ ((packed)) size_buf = { 0 };
3798 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3800 &snapid, sizeof (snapid),
3801 &size_buf, sizeof (size_buf));
3802 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3805 if (ret < sizeof (size_buf))
3809 *order = size_buf.order;
3810 dout(" order %u", (unsigned int)*order);
3812 *snap_size = le64_to_cpu(size_buf.size);
3814 dout(" snap_id 0x%016llx snap_size = %llu\n",
3815 (unsigned long long)snap_id,
3816 (unsigned long long)*snap_size);
3821 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3823 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3824 &rbd_dev->header.obj_order,
3825 &rbd_dev->header.image_size);
3828 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3834 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
3838 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3839 "rbd", "get_object_prefix", NULL, 0,
3840 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
3841 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3846 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
3847 p + ret, NULL, GFP_NOIO);
3850 if (IS_ERR(rbd_dev->header.object_prefix)) {
3851 ret = PTR_ERR(rbd_dev->header.object_prefix);
3852 rbd_dev->header.object_prefix = NULL;
3854 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
3862 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
3865 __le64 snapid = cpu_to_le64(snap_id);
3869 } __attribute__ ((packed)) features_buf = { 0 };
3873 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3874 "rbd", "get_features",
3875 &snapid, sizeof (snapid),
3876 &features_buf, sizeof (features_buf));
3877 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3880 if (ret < sizeof (features_buf))
3883 incompat = le64_to_cpu(features_buf.incompat);
3884 if (incompat & ~RBD_FEATURES_SUPPORTED)
3887 *snap_features = le64_to_cpu(features_buf.features);
3889 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
3890 (unsigned long long)snap_id,
3891 (unsigned long long)*snap_features,
3892 (unsigned long long)le64_to_cpu(features_buf.incompat));
3897 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
3899 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
3900 &rbd_dev->header.features);
3903 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3905 struct rbd_spec *parent_spec;
3907 void *reply_buf = NULL;
3917 parent_spec = rbd_spec_alloc();
3921 size = sizeof (__le64) + /* pool_id */
3922 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
3923 sizeof (__le64) + /* snap_id */
3924 sizeof (__le64); /* overlap */
3925 reply_buf = kmalloc(size, GFP_KERNEL);
3931 snapid = cpu_to_le64(CEPH_NOSNAP);
3932 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3933 "rbd", "get_parent",
3934 &snapid, sizeof (snapid),
3936 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3941 end = reply_buf + ret;
3943 ceph_decode_64_safe(&p, end, pool_id, out_err);
3944 if (pool_id == CEPH_NOPOOL) {
3946 * Either the parent never existed, or we have
3947 * record of it but the image got flattened so it no
3948 * longer has a parent. When the parent of a
3949 * layered image disappears we immediately set the
3950 * overlap to 0. The effect of this is that all new
3951 * requests will be treated as if the image had no
3954 if (rbd_dev->parent_overlap) {
3955 rbd_dev->parent_overlap = 0;
3957 rbd_dev_parent_put(rbd_dev);
3958 pr_info("%s: clone image has been flattened\n",
3959 rbd_dev->disk->disk_name);
3962 goto out; /* No parent? No problem. */
3965 /* The ceph file layout needs to fit pool id in 32 bits */
3968 if (pool_id > (u64)U32_MAX) {
3969 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
3970 (unsigned long long)pool_id, U32_MAX);
3974 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3975 if (IS_ERR(image_id)) {
3976 ret = PTR_ERR(image_id);
3979 ceph_decode_64_safe(&p, end, snap_id, out_err);
3980 ceph_decode_64_safe(&p, end, overlap, out_err);
3983 * The parent won't change (except when the clone is
3984 * flattened, already handled that). So we only need to
3985 * record the parent spec we have not already done so.
3987 if (!rbd_dev->parent_spec) {
3988 parent_spec->pool_id = pool_id;
3989 parent_spec->image_id = image_id;
3990 parent_spec->snap_id = snap_id;
3991 rbd_dev->parent_spec = parent_spec;
3992 parent_spec = NULL; /* rbd_dev now owns this */
3996 * We always update the parent overlap. If it's zero we
3997 * treat it specially.
3999 rbd_dev->parent_overlap = overlap;
4003 /* A null parent_spec indicates it's the initial probe */
4007 * The overlap has become zero, so the clone
4008 * must have been resized down to 0 at some
4009 * point. Treat this the same as a flatten.
4011 rbd_dev_parent_put(rbd_dev);
4012 pr_info("%s: clone image now standalone\n",
4013 rbd_dev->disk->disk_name);
4016 * For the initial probe, if we find the
4017 * overlap is zero we just pretend there was
4020 rbd_warn(rbd_dev, "ignoring parent of "
4021 "clone with overlap 0\n");
4028 rbd_spec_put(parent_spec);
4033 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4037 __le64 stripe_count;
4038 } __attribute__ ((packed)) striping_info_buf = { 0 };
4039 size_t size = sizeof (striping_info_buf);
4046 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4047 "rbd", "get_stripe_unit_count", NULL, 0,
4048 (char *)&striping_info_buf, size);
4049 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4056 * We don't actually support the "fancy striping" feature
4057 * (STRIPINGV2) yet, but if the striping sizes are the
4058 * defaults the behavior is the same as before. So find
4059 * out, and only fail if the image has non-default values.
4062 obj_size = (u64)1 << rbd_dev->header.obj_order;
4063 p = &striping_info_buf;
4064 stripe_unit = ceph_decode_64(&p);
4065 if (stripe_unit != obj_size) {
4066 rbd_warn(rbd_dev, "unsupported stripe unit "
4067 "(got %llu want %llu)",
4068 stripe_unit, obj_size);
4071 stripe_count = ceph_decode_64(&p);
4072 if (stripe_count != 1) {
4073 rbd_warn(rbd_dev, "unsupported stripe count "
4074 "(got %llu want 1)", stripe_count);
4077 rbd_dev->header.stripe_unit = stripe_unit;
4078 rbd_dev->header.stripe_count = stripe_count;
4083 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4085 size_t image_id_size;
4090 void *reply_buf = NULL;
4092 char *image_name = NULL;
4095 rbd_assert(!rbd_dev->spec->image_name);
4097 len = strlen(rbd_dev->spec->image_id);
4098 image_id_size = sizeof (__le32) + len;
4099 image_id = kmalloc(image_id_size, GFP_KERNEL);
4104 end = image_id + image_id_size;
4105 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4107 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4108 reply_buf = kmalloc(size, GFP_KERNEL);
4112 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
4113 "rbd", "dir_get_name",
4114 image_id, image_id_size,
4119 end = reply_buf + ret;
4121 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4122 if (IS_ERR(image_name))
4125 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4133 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4135 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4136 const char *snap_name;
4139 /* Skip over names until we find the one we are looking for */
4141 snap_name = rbd_dev->header.snap_names;
4142 while (which < snapc->num_snaps) {
4143 if (!strcmp(name, snap_name))
4144 return snapc->snaps[which];
4145 snap_name += strlen(snap_name) + 1;
4151 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4153 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4158 for (which = 0; !found && which < snapc->num_snaps; which++) {
4159 const char *snap_name;
4161 snap_id = snapc->snaps[which];
4162 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4163 if (IS_ERR(snap_name)) {
4164 /* ignore no-longer existing snapshots */
4165 if (PTR_ERR(snap_name) == -ENOENT)
4170 found = !strcmp(name, snap_name);
4173 return found ? snap_id : CEPH_NOSNAP;
4177 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4178 * no snapshot by that name is found, or if an error occurs.
4180 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4182 if (rbd_dev->image_format == 1)
4183 return rbd_v1_snap_id_by_name(rbd_dev, name);
4185 return rbd_v2_snap_id_by_name(rbd_dev, name);
4189 * When an rbd image has a parent image, it is identified by the
4190 * pool, image, and snapshot ids (not names). This function fills
4191 * in the names for those ids. (It's OK if we can't figure out the
4192 * name for an image id, but the pool and snapshot ids should always
4193 * exist and have names.) All names in an rbd spec are dynamically
4196 * When an image being mapped (not a parent) is probed, we have the
4197 * pool name and pool id, image name and image id, and the snapshot
4198 * name. The only thing we're missing is the snapshot id.
4200 static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
4202 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4203 struct rbd_spec *spec = rbd_dev->spec;
4204 const char *pool_name;
4205 const char *image_name;
4206 const char *snap_name;
4210 * An image being mapped will have the pool name (etc.), but
4211 * we need to look up the snapshot id.
4213 if (spec->pool_name) {
4214 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4217 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4218 if (snap_id == CEPH_NOSNAP)
4220 spec->snap_id = snap_id;
4222 spec->snap_id = CEPH_NOSNAP;
4228 /* Get the pool name; we have to make our own copy of this */
4230 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4232 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4235 pool_name = kstrdup(pool_name, GFP_KERNEL);
4239 /* Fetch the image name; tolerate failure here */
4241 image_name = rbd_dev_image_name(rbd_dev);
4243 rbd_warn(rbd_dev, "unable to get image name");
4245 /* Look up the snapshot name, and make a copy */
4247 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4248 if (IS_ERR(snap_name)) {
4249 ret = PTR_ERR(snap_name);
4253 spec->pool_name = pool_name;
4254 spec->image_name = image_name;
4255 spec->snap_name = snap_name;
4265 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4274 struct ceph_snap_context *snapc;
4278 * We'll need room for the seq value (maximum snapshot id),
4279 * snapshot count, and array of that many snapshot ids.
4280 * For now we have a fixed upper limit on the number we're
4281 * prepared to receive.
4283 size = sizeof (__le64) + sizeof (__le32) +
4284 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4285 reply_buf = kzalloc(size, GFP_KERNEL);
4289 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4290 "rbd", "get_snapcontext", NULL, 0,
4292 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4297 end = reply_buf + ret;
4299 ceph_decode_64_safe(&p, end, seq, out);
4300 ceph_decode_32_safe(&p, end, snap_count, out);
4303 * Make sure the reported number of snapshot ids wouldn't go
4304 * beyond the end of our buffer. But before checking that,
4305 * make sure the computed size of the snapshot context we
4306 * allocate is representable in a size_t.
4308 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4313 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4317 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4323 for (i = 0; i < snap_count; i++)
4324 snapc->snaps[i] = ceph_decode_64(&p);
4326 ceph_put_snap_context(rbd_dev->header.snapc);
4327 rbd_dev->header.snapc = snapc;
4329 dout(" snap context seq = %llu, snap_count = %u\n",
4330 (unsigned long long)seq, (unsigned int)snap_count);
4337 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4348 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4349 reply_buf = kmalloc(size, GFP_KERNEL);
4351 return ERR_PTR(-ENOMEM);
4353 snapid = cpu_to_le64(snap_id);
4354 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4355 "rbd", "get_snapshot_name",
4356 &snapid, sizeof (snapid),
4358 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4360 snap_name = ERR_PTR(ret);
4365 end = reply_buf + ret;
4366 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4367 if (IS_ERR(snap_name))
4370 dout(" snap_id 0x%016llx snap_name = %s\n",
4371 (unsigned long long)snap_id, snap_name);
4378 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4380 bool first_time = rbd_dev->header.object_prefix == NULL;
4383 ret = rbd_dev_v2_image_size(rbd_dev);
4388 ret = rbd_dev_v2_header_onetime(rbd_dev);
4394 * If the image supports layering, get the parent info. We
4395 * need to probe the first time regardless. Thereafter we
4396 * only need to if there's a parent, to see if it has
4397 * disappeared due to the mapped image getting flattened.
4399 if (rbd_dev->header.features & RBD_FEATURE_LAYERING &&
4400 (first_time || rbd_dev->parent_spec)) {
4403 ret = rbd_dev_v2_parent_info(rbd_dev);
4408 * Print a warning if this is the initial probe and
4409 * the image has a parent. Don't print it if the
4410 * image now being probed is itself a parent. We
4411 * can tell at this point because we won't know its
4412 * pool name yet (just its pool id).
4414 warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name;
4415 if (first_time && warn)
4416 rbd_warn(rbd_dev, "WARNING: kernel layering "
4417 "is EXPERIMENTAL!");
4420 if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
4421 if (rbd_dev->mapping.size != rbd_dev->header.image_size)
4422 rbd_dev->mapping.size = rbd_dev->header.image_size;
4424 ret = rbd_dev_v2_snap_context(rbd_dev);
4425 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4430 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4435 dev = &rbd_dev->dev;
4436 dev->bus = &rbd_bus_type;
4437 dev->type = &rbd_device_type;
4438 dev->parent = &rbd_root_dev;
4439 dev->release = rbd_dev_device_release;
4440 dev_set_name(dev, "%d", rbd_dev->dev_id);
4441 ret = device_register(dev);
4446 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4448 device_unregister(&rbd_dev->dev);
4452 * Get a unique rbd identifier for the given new rbd_dev, and add
4453 * the rbd_dev to the global list.
4455 static int rbd_dev_id_get(struct rbd_device *rbd_dev)
4459 new_dev_id = ida_simple_get(&rbd_dev_id_ida,
4460 0, minor_to_rbd_dev_id(1 << MINORBITS),
4465 rbd_dev->dev_id = new_dev_id;
4467 spin_lock(&rbd_dev_list_lock);
4468 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4469 spin_unlock(&rbd_dev_list_lock);
4471 dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
4477 * Remove an rbd_dev from the global list, and record that its
4478 * identifier is no longer in use.
4480 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4482 spin_lock(&rbd_dev_list_lock);
4483 list_del_init(&rbd_dev->node);
4484 spin_unlock(&rbd_dev_list_lock);
4486 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4488 dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
4492 * Skips over white space at *buf, and updates *buf to point to the
4493 * first found non-space character (if any). Returns the length of
4494 * the token (string of non-white space characters) found. Note
4495 * that *buf must be terminated with '\0'.
4497 static inline size_t next_token(const char **buf)
4500 * These are the characters that produce nonzero for
4501 * isspace() in the "C" and "POSIX" locales.
4503 const char *spaces = " \f\n\r\t\v";
4505 *buf += strspn(*buf, spaces); /* Find start of token */
4507 return strcspn(*buf, spaces); /* Return token length */
4511 * Finds the next token in *buf, and if the provided token buffer is
4512 * big enough, copies the found token into it. The result, if
4513 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4514 * must be terminated with '\0' on entry.
4516 * Returns the length of the token found (not including the '\0').
4517 * Return value will be 0 if no token is found, and it will be >=
4518 * token_size if the token would not fit.
4520 * The *buf pointer will be updated to point beyond the end of the
4521 * found token. Note that this occurs even if the token buffer is
4522 * too small to hold it.
4524 static inline size_t copy_token(const char **buf,
4530 len = next_token(buf);
4531 if (len < token_size) {
4532 memcpy(token, *buf, len);
4533 *(token + len) = '\0';
4541 * Finds the next token in *buf, dynamically allocates a buffer big
4542 * enough to hold a copy of it, and copies the token into the new
4543 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4544 * that a duplicate buffer is created even for a zero-length token.
4546 * Returns a pointer to the newly-allocated duplicate, or a null
4547 * pointer if memory for the duplicate was not available. If
4548 * the lenp argument is a non-null pointer, the length of the token
4549 * (not including the '\0') is returned in *lenp.
4551 * If successful, the *buf pointer will be updated to point beyond
4552 * the end of the found token.
4554 * Note: uses GFP_KERNEL for allocation.
4556 static inline char *dup_token(const char **buf, size_t *lenp)
4561 len = next_token(buf);
4562 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4565 *(dup + len) = '\0';
4575 * Parse the options provided for an "rbd add" (i.e., rbd image
4576 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4577 * and the data written is passed here via a NUL-terminated buffer.
4578 * Returns 0 if successful or an error code otherwise.
4580 * The information extracted from these options is recorded in
4581 * the other parameters which return dynamically-allocated
4584 * The address of a pointer that will refer to a ceph options
4585 * structure. Caller must release the returned pointer using
4586 * ceph_destroy_options() when it is no longer needed.
4588 * Address of an rbd options pointer. Fully initialized by
4589 * this function; caller must release with kfree().
4591 * Address of an rbd image specification pointer. Fully
4592 * initialized by this function based on parsed options.
4593 * Caller must release with rbd_spec_put().
4595 * The options passed take this form:
4596 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4599 * A comma-separated list of one or more monitor addresses.
4600 * A monitor address is an ip address, optionally followed
4601 * by a port number (separated by a colon).
4602 * I.e.: ip1[:port1][,ip2[:port2]...]
4604 * A comma-separated list of ceph and/or rbd options.
4606 * The name of the rados pool containing the rbd image.
4608 * The name of the image in that pool to map.
4610 * An optional snapshot id. If provided, the mapping will
4611 * present data from the image at the time that snapshot was
4612 * created. The image head is used if no snapshot id is
4613 * provided. Snapshot mappings are always read-only.
4615 static int rbd_add_parse_args(const char *buf,
4616 struct ceph_options **ceph_opts,
4617 struct rbd_options **opts,
4618 struct rbd_spec **rbd_spec)
4622 const char *mon_addrs;
4624 size_t mon_addrs_size;
4625 struct rbd_spec *spec = NULL;
4626 struct rbd_options *rbd_opts = NULL;
4627 struct ceph_options *copts;
4630 /* The first four tokens are required */
4632 len = next_token(&buf);
4634 rbd_warn(NULL, "no monitor address(es) provided");
4638 mon_addrs_size = len + 1;
4642 options = dup_token(&buf, NULL);
4646 rbd_warn(NULL, "no options provided");
4650 spec = rbd_spec_alloc();
4654 spec->pool_name = dup_token(&buf, NULL);
4655 if (!spec->pool_name)
4657 if (!*spec->pool_name) {
4658 rbd_warn(NULL, "no pool name provided");
4662 spec->image_name = dup_token(&buf, NULL);
4663 if (!spec->image_name)
4665 if (!*spec->image_name) {
4666 rbd_warn(NULL, "no image name provided");
4671 * Snapshot name is optional; default is to use "-"
4672 * (indicating the head/no snapshot).
4674 len = next_token(&buf);
4676 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4677 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4678 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4679 ret = -ENAMETOOLONG;
4682 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4685 *(snap_name + len) = '\0';
4686 spec->snap_name = snap_name;
4688 /* Initialize all rbd options to the defaults */
4690 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4694 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4696 copts = ceph_parse_options(options, mon_addrs,
4697 mon_addrs + mon_addrs_size - 1,
4698 parse_rbd_opts_token, rbd_opts);
4699 if (IS_ERR(copts)) {
4700 ret = PTR_ERR(copts);
4721 * An rbd format 2 image has a unique identifier, distinct from the
4722 * name given to it by the user. Internally, that identifier is
4723 * what's used to specify the names of objects related to the image.
4725 * A special "rbd id" object is used to map an rbd image name to its
4726 * id. If that object doesn't exist, then there is no v2 rbd image
4727 * with the supplied name.
4729 * This function will record the given rbd_dev's image_id field if
4730 * it can be determined, and in that case will return 0. If any
4731 * errors occur a negative errno will be returned and the rbd_dev's
4732 * image_id field will be unchanged (and should be NULL).
4734 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4743 * When probing a parent image, the image id is already
4744 * known (and the image name likely is not). There's no
4745 * need to fetch the image id again in this case. We
4746 * do still need to set the image format though.
4748 if (rbd_dev->spec->image_id) {
4749 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4755 * First, see if the format 2 image id file exists, and if
4756 * so, get the image's persistent id from it.
4758 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
4759 object_name = kmalloc(size, GFP_NOIO);
4762 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
4763 dout("rbd id object name is %s\n", object_name);
4765 /* Response will be an encoded string, which includes a length */
4767 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4768 response = kzalloc(size, GFP_NOIO);
4774 /* If it doesn't exist we'll assume it's a format 1 image */
4776 ret = rbd_obj_method_sync(rbd_dev, object_name,
4777 "rbd", "get_id", NULL, 0,
4778 response, RBD_IMAGE_ID_LEN_MAX);
4779 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4780 if (ret == -ENOENT) {
4781 image_id = kstrdup("", GFP_KERNEL);
4782 ret = image_id ? 0 : -ENOMEM;
4784 rbd_dev->image_format = 1;
4785 } else if (ret > sizeof (__le32)) {
4788 image_id = ceph_extract_encoded_string(&p, p + ret,
4790 ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0;
4792 rbd_dev->image_format = 2;
4798 rbd_dev->spec->image_id = image_id;
4799 dout("image_id is %s\n", image_id);
4809 * Undo whatever state changes are made by v1 or v2 header info
4812 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4814 struct rbd_image_header *header;
4816 /* Drop parent reference unless it's already been done (or none) */
4818 if (rbd_dev->parent_overlap)
4819 rbd_dev_parent_put(rbd_dev);
4821 /* Free dynamic fields from the header, then zero it out */
4823 header = &rbd_dev->header;
4824 ceph_put_snap_context(header->snapc);
4825 kfree(header->snap_sizes);
4826 kfree(header->snap_names);
4827 kfree(header->object_prefix);
4828 memset(header, 0, sizeof (*header));
4831 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
4835 ret = rbd_dev_v2_object_prefix(rbd_dev);
4840 * Get the and check features for the image. Currently the
4841 * features are assumed to never change.
4843 ret = rbd_dev_v2_features(rbd_dev);
4847 /* If the image supports fancy striping, get its parameters */
4849 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
4850 ret = rbd_dev_v2_striping_info(rbd_dev);
4854 /* No support for crypto and compression type format 2 images */
4858 rbd_dev->header.features = 0;
4859 kfree(rbd_dev->header.object_prefix);
4860 rbd_dev->header.object_prefix = NULL;
4865 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
4867 struct rbd_device *parent = NULL;
4868 struct rbd_spec *parent_spec;
4869 struct rbd_client *rbdc;
4872 if (!rbd_dev->parent_spec)
4875 * We need to pass a reference to the client and the parent
4876 * spec when creating the parent rbd_dev. Images related by
4877 * parent/child relationships always share both.
4879 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
4880 rbdc = __rbd_get_client(rbd_dev->rbd_client);
4883 parent = rbd_dev_create(rbdc, parent_spec);
4887 ret = rbd_dev_image_probe(parent, false);
4890 rbd_dev->parent = parent;
4891 atomic_set(&rbd_dev->parent_ref, 1);
4896 rbd_dev_unparent(rbd_dev);
4897 kfree(rbd_dev->header_name);
4898 rbd_dev_destroy(parent);
4900 rbd_put_client(rbdc);
4901 rbd_spec_put(parent_spec);
4907 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4911 /* Get an id and fill in device name. */
4913 ret = rbd_dev_id_get(rbd_dev);
4917 BUILD_BUG_ON(DEV_NAME_LEN
4918 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
4919 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
4921 /* Record our major and minor device numbers. */
4923 if (!single_major) {
4924 ret = register_blkdev(0, rbd_dev->name);
4928 rbd_dev->major = ret;
4931 rbd_dev->major = rbd_major;
4932 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
4935 /* Set up the blkdev mapping. */
4937 ret = rbd_init_disk(rbd_dev);
4939 goto err_out_blkdev;
4941 ret = rbd_dev_mapping_set(rbd_dev);
4944 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
4946 ret = rbd_bus_add_dev(rbd_dev);
4948 goto err_out_mapping;
4950 /* Everything's ready. Announce the disk to the world. */
4952 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4953 add_disk(rbd_dev->disk);
4955 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
4956 (unsigned long long) rbd_dev->mapping.size);
4961 rbd_dev_mapping_clear(rbd_dev);
4963 rbd_free_disk(rbd_dev);
4966 unregister_blkdev(rbd_dev->major, rbd_dev->name);
4968 rbd_dev_id_put(rbd_dev);
4969 rbd_dev_mapping_clear(rbd_dev);
4974 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
4976 struct rbd_spec *spec = rbd_dev->spec;
4979 /* Record the header object name for this rbd image. */
4981 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4983 if (rbd_dev->image_format == 1)
4984 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
4986 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
4988 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
4989 if (!rbd_dev->header_name)
4992 if (rbd_dev->image_format == 1)
4993 sprintf(rbd_dev->header_name, "%s%s",
4994 spec->image_name, RBD_SUFFIX);
4996 sprintf(rbd_dev->header_name, "%s%s",
4997 RBD_HEADER_PREFIX, spec->image_id);
5001 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5003 rbd_dev_unprobe(rbd_dev);
5004 kfree(rbd_dev->header_name);
5005 rbd_dev->header_name = NULL;
5006 rbd_dev->image_format = 0;
5007 kfree(rbd_dev->spec->image_id);
5008 rbd_dev->spec->image_id = NULL;
5010 rbd_dev_destroy(rbd_dev);
5014 * Probe for the existence of the header object for the given rbd
5015 * device. If this image is the one being mapped (i.e., not a
5016 * parent), initiate a watch on its header object before using that
5017 * object to get detailed information about the rbd image.
5019 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
5024 * Get the id from the image id object. Unless there's an
5025 * error, rbd_dev->spec->image_id will be filled in with
5026 * a dynamically-allocated string, and rbd_dev->image_format
5027 * will be set to either 1 or 2.
5029 ret = rbd_dev_image_id(rbd_dev);
5032 rbd_assert(rbd_dev->spec->image_id);
5033 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5035 ret = rbd_dev_header_name(rbd_dev);
5037 goto err_out_format;
5040 ret = rbd_dev_header_watch_sync(rbd_dev);
5042 goto out_header_name;
5045 if (rbd_dev->image_format == 1)
5046 ret = rbd_dev_v1_header_info(rbd_dev);
5048 ret = rbd_dev_v2_header_info(rbd_dev);
5052 ret = rbd_dev_spec_update(rbd_dev);
5056 ret = rbd_dev_probe_parent(rbd_dev);
5060 dout("discovered format %u image, header name is %s\n",
5061 rbd_dev->image_format, rbd_dev->header_name);
5065 rbd_dev_unprobe(rbd_dev);
5068 rbd_dev_header_unwatch_sync(rbd_dev);
5070 kfree(rbd_dev->header_name);
5071 rbd_dev->header_name = NULL;
5073 rbd_dev->image_format = 0;
5074 kfree(rbd_dev->spec->image_id);
5075 rbd_dev->spec->image_id = NULL;
5077 dout("probe failed, returning %d\n", ret);
5082 static ssize_t do_rbd_add(struct bus_type *bus,
5086 struct rbd_device *rbd_dev = NULL;
5087 struct ceph_options *ceph_opts = NULL;
5088 struct rbd_options *rbd_opts = NULL;
5089 struct rbd_spec *spec = NULL;
5090 struct rbd_client *rbdc;
5091 struct ceph_osd_client *osdc;
5095 if (!try_module_get(THIS_MODULE))
5098 /* parse add command */
5099 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5101 goto err_out_module;
5102 read_only = rbd_opts->read_only;
5104 rbd_opts = NULL; /* done with this */
5106 rbdc = rbd_get_client(ceph_opts);
5113 osdc = &rbdc->client->osdc;
5114 rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
5116 goto err_out_client;
5117 spec->pool_id = (u64)rc;
5119 /* The ceph file layout needs to fit pool id in 32 bits */
5121 if (spec->pool_id > (u64)U32_MAX) {
5122 rbd_warn(NULL, "pool id too large (%llu > %u)\n",
5123 (unsigned long long)spec->pool_id, U32_MAX);
5125 goto err_out_client;
5128 rbd_dev = rbd_dev_create(rbdc, spec);
5130 goto err_out_client;
5131 rbdc = NULL; /* rbd_dev now owns this */
5132 spec = NULL; /* rbd_dev now owns this */
5134 rc = rbd_dev_image_probe(rbd_dev, true);
5136 goto err_out_rbd_dev;
5138 /* If we are mapping a snapshot it must be marked read-only */
5140 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5142 rbd_dev->mapping.read_only = read_only;
5144 rc = rbd_dev_device_setup(rbd_dev);
5147 * rbd_dev_header_unwatch_sync() can't be moved into
5148 * rbd_dev_image_release() without refactoring, see
5149 * commit 1f3ef78861ac.
5151 rbd_dev_header_unwatch_sync(rbd_dev);
5152 rbd_dev_image_release(rbd_dev);
5153 goto err_out_module;
5159 rbd_dev_destroy(rbd_dev);
5161 rbd_put_client(rbdc);
5165 module_put(THIS_MODULE);
5167 dout("Error adding device %s\n", buf);
5172 static ssize_t rbd_add(struct bus_type *bus,
5179 return do_rbd_add(bus, buf, count);
5182 static ssize_t rbd_add_single_major(struct bus_type *bus,
5186 return do_rbd_add(bus, buf, count);
5189 static void rbd_dev_device_release(struct device *dev)
5191 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5193 rbd_free_disk(rbd_dev);
5194 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5195 rbd_dev_mapping_clear(rbd_dev);
5197 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5198 rbd_dev_id_put(rbd_dev);
5199 rbd_dev_mapping_clear(rbd_dev);
5202 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5204 while (rbd_dev->parent) {
5205 struct rbd_device *first = rbd_dev;
5206 struct rbd_device *second = first->parent;
5207 struct rbd_device *third;
5210 * Follow to the parent with no grandparent and
5213 while (second && (third = second->parent)) {
5218 rbd_dev_image_release(second);
5219 first->parent = NULL;
5220 first->parent_overlap = 0;
5222 rbd_assert(first->parent_spec);
5223 rbd_spec_put(first->parent_spec);
5224 first->parent_spec = NULL;
5228 static ssize_t do_rbd_remove(struct bus_type *bus,
5232 struct rbd_device *rbd_dev = NULL;
5233 struct list_head *tmp;
5236 bool already = false;
5239 ret = kstrtoul(buf, 10, &ul);
5243 /* convert to int; abort if we lost anything in the conversion */
5249 spin_lock(&rbd_dev_list_lock);
5250 list_for_each(tmp, &rbd_dev_list) {
5251 rbd_dev = list_entry(tmp, struct rbd_device, node);
5252 if (rbd_dev->dev_id == dev_id) {
5258 spin_lock_irq(&rbd_dev->lock);
5259 if (rbd_dev->open_count)
5262 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5264 spin_unlock_irq(&rbd_dev->lock);
5266 spin_unlock(&rbd_dev_list_lock);
5267 if (ret < 0 || already)
5270 rbd_dev_header_unwatch_sync(rbd_dev);
5272 * flush remaining watch callbacks - these must be complete
5273 * before the osd_client is shutdown
5275 dout("%s: flushing notifies", __func__);
5276 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
5279 * Don't free anything from rbd_dev->disk until after all
5280 * notifies are completely processed. Otherwise
5281 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5282 * in a potential use after free of rbd_dev->disk or rbd_dev.
5284 rbd_bus_del_dev(rbd_dev);
5285 rbd_dev_image_release(rbd_dev);
5286 module_put(THIS_MODULE);
5291 static ssize_t rbd_remove(struct bus_type *bus,
5298 return do_rbd_remove(bus, buf, count);
5301 static ssize_t rbd_remove_single_major(struct bus_type *bus,
5305 return do_rbd_remove(bus, buf, count);
5309 * create control files in sysfs
5312 static int rbd_sysfs_init(void)
5316 ret = device_register(&rbd_root_dev);
5320 ret = bus_register(&rbd_bus_type);
5322 device_unregister(&rbd_root_dev);
5327 static void rbd_sysfs_cleanup(void)
5329 bus_unregister(&rbd_bus_type);
5330 device_unregister(&rbd_root_dev);
5333 static int rbd_slab_init(void)
5335 rbd_assert(!rbd_img_request_cache);
5336 rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5337 sizeof (struct rbd_img_request),
5338 __alignof__(struct rbd_img_request),
5340 if (!rbd_img_request_cache)
5343 rbd_assert(!rbd_obj_request_cache);
5344 rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5345 sizeof (struct rbd_obj_request),
5346 __alignof__(struct rbd_obj_request),
5348 if (!rbd_obj_request_cache)
5351 rbd_assert(!rbd_segment_name_cache);
5352 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5353 CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
5354 if (rbd_segment_name_cache)
5357 if (rbd_obj_request_cache) {
5358 kmem_cache_destroy(rbd_obj_request_cache);
5359 rbd_obj_request_cache = NULL;
5362 kmem_cache_destroy(rbd_img_request_cache);
5363 rbd_img_request_cache = NULL;
5368 static void rbd_slab_exit(void)
5370 rbd_assert(rbd_segment_name_cache);
5371 kmem_cache_destroy(rbd_segment_name_cache);
5372 rbd_segment_name_cache = NULL;
5374 rbd_assert(rbd_obj_request_cache);
5375 kmem_cache_destroy(rbd_obj_request_cache);
5376 rbd_obj_request_cache = NULL;
5378 rbd_assert(rbd_img_request_cache);
5379 kmem_cache_destroy(rbd_img_request_cache);
5380 rbd_img_request_cache = NULL;
5383 static int __init rbd_init(void)
5387 if (!libceph_compatible(NULL)) {
5388 rbd_warn(NULL, "libceph incompatibility (quitting)");
5392 rc = rbd_slab_init();
5397 rbd_major = register_blkdev(0, RBD_DRV_NAME);
5398 if (rbd_major < 0) {
5404 rc = rbd_sysfs_init();
5406 goto err_out_blkdev;
5409 pr_info("loaded (major %d)\n", rbd_major);
5411 pr_info("loaded\n");
5417 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5423 static void __exit rbd_exit(void)
5425 rbd_sysfs_cleanup();
5427 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5431 module_init(rbd_init);
5432 module_exit(rbd_exit);
5434 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
5435 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5436 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5437 /* following authorship retained from original osdblk.c */
5438 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5440 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
5441 MODULE_LICENSE("GPL");