3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
42 #include <linux/blkdev.h>
43 #include <linux/slab.h>
45 #include "rbd_types.h"
47 #define RBD_DEBUG /* Activate rbd_assert() calls */
50 * The basic unit of block I/O is a sector. It is interpreted in a
51 * number of contexts in Linux (blk, bio, genhd), but the default is
52 * universally 512 bytes. These symbols are just slightly more
53 * meaningful than the bare numbers they represent.
55 #define SECTOR_SHIFT 9
56 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
58 #define RBD_DRV_NAME "rbd"
59 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
61 #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
63 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
64 #define RBD_MAX_SNAP_NAME_LEN \
65 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
67 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
69 #define RBD_SNAP_HEAD_NAME "-"
71 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
73 /* This allows a single page to hold an image name sent by OSD */
74 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
75 #define RBD_IMAGE_ID_LEN_MAX 64
77 #define RBD_OBJ_PREFIX_LEN_MAX 64
81 #define RBD_FEATURE_LAYERING (1<<0)
82 #define RBD_FEATURE_STRIPINGV2 (1<<1)
83 #define RBD_FEATURES_ALL \
84 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
86 /* Features supported by this (client software) implementation. */
88 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
91 * An RBD device name will be "rbd#", where the "rbd" comes from
92 * RBD_DRV_NAME above, and # is a unique integer identifier.
93 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
94 * enough to hold all possible device names.
96 #define DEV_NAME_LEN 32
97 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
100 * block device image metadata (in-memory version)
102 struct rbd_image_header {
103 /* These six fields never change for a given rbd image */
110 u64 features; /* Might be changeable someday? */
112 /* The remaining fields need to be updated occasionally */
114 struct ceph_snap_context *snapc;
115 char *snap_names; /* format 1 only */
116 u64 *snap_sizes; /* format 1 only */
120 * An rbd image specification.
122 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
123 * identify an image. Each rbd_dev structure includes a pointer to
124 * an rbd_spec structure that encapsulates this identity.
126 * Each of the id's in an rbd_spec has an associated name. For a
127 * user-mapped image, the names are supplied and the id's associated
128 * with them are looked up. For a layered image, a parent image is
129 * defined by the tuple, and the names are looked up.
131 * An rbd_dev structure contains a parent_spec pointer which is
132 * non-null if the image it represents is a child in a layered
133 * image. This pointer will refer to the rbd_spec structure used
134 * by the parent rbd_dev for its own identity (i.e., the structure
135 * is shared between the parent and child).
137 * Since these structures are populated once, during the discovery
138 * phase of image construction, they are effectively immutable so
139 * we make no effort to synchronize access to them.
141 * Note that code herein does not assume the image name is known (it
142 * could be a null pointer).
146 const char *pool_name;
148 const char *image_id;
149 const char *image_name;
152 const char *snap_name;
158 * an instance of the client. multiple devices may share an rbd client.
161 struct ceph_client *client;
163 struct list_head node;
166 struct rbd_img_request;
167 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
169 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
171 struct rbd_obj_request;
172 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
174 enum obj_request_type {
175 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
179 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
180 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
181 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
182 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
185 struct rbd_obj_request {
186 const char *object_name;
187 u64 offset; /* object start byte */
188 u64 length; /* bytes from offset */
192 * An object request associated with an image will have its
193 * img_data flag set; a standalone object request will not.
195 * A standalone object request will have which == BAD_WHICH
196 * and a null obj_request pointer.
198 * An object request initiated in support of a layered image
199 * object (to check for its existence before a write) will
200 * have which == BAD_WHICH and a non-null obj_request pointer.
202 * Finally, an object request for rbd image data will have
203 * which != BAD_WHICH, and will have a non-null img_request
204 * pointer. The value of which will be in the range
205 * 0..(img_request->obj_request_count-1).
208 struct rbd_obj_request *obj_request; /* STAT op */
210 struct rbd_img_request *img_request;
212 /* links for img_request->obj_requests list */
213 struct list_head links;
216 u32 which; /* posn image request list */
218 enum obj_request_type type;
220 struct bio *bio_list;
226 struct page **copyup_pages;
228 struct ceph_osd_request *osd_req;
230 u64 xferred; /* bytes transferred */
233 rbd_obj_callback_t callback;
234 struct completion completion;
240 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
241 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
242 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
245 struct rbd_img_request {
246 struct rbd_device *rbd_dev;
247 u64 offset; /* starting image byte offset */
248 u64 length; /* byte count from offset */
251 u64 snap_id; /* for reads */
252 struct ceph_snap_context *snapc; /* for writes */
255 struct request *rq; /* block request */
256 struct rbd_obj_request *obj_request; /* obj req initiator */
258 struct page **copyup_pages;
259 spinlock_t completion_lock;/* protects next_completion */
261 rbd_img_callback_t callback;
262 u64 xferred;/* aggregate bytes transferred */
263 int result; /* first nonzero obj_request result */
265 u32 obj_request_count;
266 struct list_head obj_requests; /* rbd_obj_request structs */
271 #define for_each_obj_request(ireq, oreq) \
272 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
273 #define for_each_obj_request_from(ireq, oreq) \
274 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
275 #define for_each_obj_request_safe(ireq, oreq, n) \
276 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
288 int dev_id; /* blkdev unique id */
290 int major; /* blkdev assigned major */
291 struct gendisk *disk; /* blkdev's gendisk and rq */
293 u32 image_format; /* Either 1 or 2 */
294 struct rbd_client *rbd_client;
296 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
298 spinlock_t lock; /* queue, flags, open_count */
300 struct rbd_image_header header;
301 unsigned long flags; /* possibly lock protected */
302 struct rbd_spec *spec;
306 struct ceph_file_layout layout;
308 struct ceph_osd_event *watch_event;
309 struct rbd_obj_request *watch_request;
311 struct rbd_spec *parent_spec;
313 struct rbd_device *parent;
315 /* protects updating the header */
316 struct rw_semaphore header_rwsem;
318 struct rbd_mapping mapping;
320 struct list_head node;
324 unsigned long open_count; /* protected by lock */
328 * Flag bits for rbd_dev->flags. If atomicity is required,
329 * rbd_dev->lock is used to protect access.
331 * Currently, only the "removing" flag (which is coupled with the
332 * "open_count" field) requires atomic access.
335 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
336 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
339 static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
341 static LIST_HEAD(rbd_dev_list); /* devices */
342 static DEFINE_SPINLOCK(rbd_dev_list_lock);
344 static LIST_HEAD(rbd_client_list); /* clients */
345 static DEFINE_SPINLOCK(rbd_client_list_lock);
347 /* Slab caches for frequently-allocated structures */
349 static struct kmem_cache *rbd_img_request_cache;
350 static struct kmem_cache *rbd_obj_request_cache;
351 static struct kmem_cache *rbd_segment_name_cache;
353 static int rbd_img_request_submit(struct rbd_img_request *img_request);
355 static void rbd_dev_device_release(struct device *dev);
357 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
359 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
361 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool read_only);
363 static struct bus_attribute rbd_bus_attrs[] = {
364 __ATTR(add, S_IWUSR, NULL, rbd_add),
365 __ATTR(remove, S_IWUSR, NULL, rbd_remove),
369 static struct bus_type rbd_bus_type = {
371 .bus_attrs = rbd_bus_attrs,
374 static void rbd_root_dev_release(struct device *dev)
378 static struct device rbd_root_dev = {
380 .release = rbd_root_dev_release,
383 static __printf(2, 3)
384 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
386 struct va_format vaf;
394 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
395 else if (rbd_dev->disk)
396 printk(KERN_WARNING "%s: %s: %pV\n",
397 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
398 else if (rbd_dev->spec && rbd_dev->spec->image_name)
399 printk(KERN_WARNING "%s: image %s: %pV\n",
400 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
401 else if (rbd_dev->spec && rbd_dev->spec->image_id)
402 printk(KERN_WARNING "%s: id %s: %pV\n",
403 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
405 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
406 RBD_DRV_NAME, rbd_dev, &vaf);
411 #define rbd_assert(expr) \
412 if (unlikely(!(expr))) { \
413 printk(KERN_ERR "\nAssertion failure in %s() " \
415 "\trbd_assert(%s);\n\n", \
416 __func__, __LINE__, #expr); \
419 #else /* !RBD_DEBUG */
420 # define rbd_assert(expr) ((void) 0)
421 #endif /* !RBD_DEBUG */
423 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
424 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
425 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
427 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
428 static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev);
429 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
431 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
432 u8 *order, u64 *snap_size);
433 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
435 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
437 static int rbd_open(struct block_device *bdev, fmode_t mode)
439 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
440 bool removing = false;
442 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
445 spin_lock_irq(&rbd_dev->lock);
446 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
449 rbd_dev->open_count++;
450 spin_unlock_irq(&rbd_dev->lock);
454 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
455 (void) get_device(&rbd_dev->dev);
456 set_device_ro(bdev, rbd_dev->mapping.read_only);
457 mutex_unlock(&ctl_mutex);
462 static int rbd_release(struct gendisk *disk, fmode_t mode)
464 struct rbd_device *rbd_dev = disk->private_data;
465 unsigned long open_count_before;
467 spin_lock_irq(&rbd_dev->lock);
468 open_count_before = rbd_dev->open_count--;
469 spin_unlock_irq(&rbd_dev->lock);
470 rbd_assert(open_count_before > 0);
472 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
473 put_device(&rbd_dev->dev);
474 mutex_unlock(&ctl_mutex);
479 static const struct block_device_operations rbd_bd_ops = {
480 .owner = THIS_MODULE,
482 .release = rbd_release,
486 * Initialize an rbd client instance.
489 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
491 struct rbd_client *rbdc;
494 dout("%s:\n", __func__);
495 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
499 kref_init(&rbdc->kref);
500 INIT_LIST_HEAD(&rbdc->node);
502 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
504 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
505 if (IS_ERR(rbdc->client))
507 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
509 ret = ceph_open_session(rbdc->client);
513 spin_lock(&rbd_client_list_lock);
514 list_add_tail(&rbdc->node, &rbd_client_list);
515 spin_unlock(&rbd_client_list_lock);
517 mutex_unlock(&ctl_mutex);
518 dout("%s: rbdc %p\n", __func__, rbdc);
523 ceph_destroy_client(rbdc->client);
525 mutex_unlock(&ctl_mutex);
529 ceph_destroy_options(ceph_opts);
530 dout("%s: error %d\n", __func__, ret);
535 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
537 kref_get(&rbdc->kref);
543 * Find a ceph client with specific addr and configuration. If
544 * found, bump its reference count.
546 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
548 struct rbd_client *client_node;
551 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
554 spin_lock(&rbd_client_list_lock);
555 list_for_each_entry(client_node, &rbd_client_list, node) {
556 if (!ceph_compare_options(ceph_opts, client_node->client)) {
557 __rbd_get_client(client_node);
563 spin_unlock(&rbd_client_list_lock);
565 return found ? client_node : NULL;
575 /* string args above */
578 /* Boolean args above */
582 static match_table_t rbd_opts_tokens = {
584 /* string args above */
585 {Opt_read_only, "read_only"},
586 {Opt_read_only, "ro"}, /* Alternate spelling */
587 {Opt_read_write, "read_write"},
588 {Opt_read_write, "rw"}, /* Alternate spelling */
589 /* Boolean args above */
597 #define RBD_READ_ONLY_DEFAULT false
599 static int parse_rbd_opts_token(char *c, void *private)
601 struct rbd_options *rbd_opts = private;
602 substring_t argstr[MAX_OPT_ARGS];
603 int token, intval, ret;
605 token = match_token(c, rbd_opts_tokens, argstr);
609 if (token < Opt_last_int) {
610 ret = match_int(&argstr[0], &intval);
612 pr_err("bad mount option arg (not int) "
616 dout("got int token %d val %d\n", token, intval);
617 } else if (token > Opt_last_int && token < Opt_last_string) {
618 dout("got string token %d val %s\n", token,
620 } else if (token > Opt_last_string && token < Opt_last_bool) {
621 dout("got Boolean token %d\n", token);
623 dout("got token %d\n", token);
628 rbd_opts->read_only = true;
631 rbd_opts->read_only = false;
641 * Get a ceph client with specific addr and configuration, if one does
642 * not exist create it.
644 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
646 struct rbd_client *rbdc;
648 rbdc = rbd_client_find(ceph_opts);
649 if (rbdc) /* using an existing client */
650 ceph_destroy_options(ceph_opts);
652 rbdc = rbd_client_create(ceph_opts);
658 * Destroy ceph client
660 * Caller must hold rbd_client_list_lock.
662 static void rbd_client_release(struct kref *kref)
664 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
666 dout("%s: rbdc %p\n", __func__, rbdc);
667 spin_lock(&rbd_client_list_lock);
668 list_del(&rbdc->node);
669 spin_unlock(&rbd_client_list_lock);
671 ceph_destroy_client(rbdc->client);
676 * Drop reference to ceph client node. If it's not referenced anymore, release
679 static void rbd_put_client(struct rbd_client *rbdc)
682 kref_put(&rbdc->kref, rbd_client_release);
685 static bool rbd_image_format_valid(u32 image_format)
687 return image_format == 1 || image_format == 2;
690 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
695 /* The header has to start with the magic rbd header text */
696 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
699 /* The bio layer requires at least sector-sized I/O */
701 if (ondisk->options.order < SECTOR_SHIFT)
704 /* If we use u64 in a few spots we may be able to loosen this */
706 if (ondisk->options.order > 8 * sizeof (int) - 1)
710 * The size of a snapshot header has to fit in a size_t, and
711 * that limits the number of snapshots.
713 snap_count = le32_to_cpu(ondisk->snap_count);
714 size = SIZE_MAX - sizeof (struct ceph_snap_context);
715 if (snap_count > size / sizeof (__le64))
719 * Not only that, but the size of the entire the snapshot
720 * header must also be representable in a size_t.
722 size -= snap_count * sizeof (__le64);
723 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
730 * Fill an rbd image header with information from the given format 1
733 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
734 struct rbd_image_header_ondisk *ondisk)
736 struct rbd_image_header *header = &rbd_dev->header;
737 bool first_time = header->object_prefix == NULL;
738 struct ceph_snap_context *snapc;
739 char *object_prefix = NULL;
740 char *snap_names = NULL;
741 u64 *snap_sizes = NULL;
747 /* Allocate this now to avoid having to handle failure below */
752 len = strnlen(ondisk->object_prefix,
753 sizeof (ondisk->object_prefix));
754 object_prefix = kmalloc(len + 1, GFP_KERNEL);
757 memcpy(object_prefix, ondisk->object_prefix, len);
758 object_prefix[len] = '\0';
761 /* Allocate the snapshot context and fill it in */
763 snap_count = le32_to_cpu(ondisk->snap_count);
764 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
767 snapc->seq = le64_to_cpu(ondisk->snap_seq);
769 struct rbd_image_snap_ondisk *snaps;
770 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
772 /* We'll keep a copy of the snapshot names... */
774 if (snap_names_len > (u64)SIZE_MAX)
776 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
780 /* ...as well as the array of their sizes. */
782 size = snap_count * sizeof (*header->snap_sizes);
783 snap_sizes = kmalloc(size, GFP_KERNEL);
788 * Copy the names, and fill in each snapshot's id
791 * Note that rbd_dev_v1_header_read() guarantees the
792 * ondisk buffer we're working with has
793 * snap_names_len bytes beyond the end of the
794 * snapshot id array, this memcpy() is safe.
796 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
797 snaps = ondisk->snaps;
798 for (i = 0; i < snap_count; i++) {
799 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
800 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
804 /* We won't fail any more, fill in the header */
806 down_write(&rbd_dev->header_rwsem);
808 header->object_prefix = object_prefix;
809 header->obj_order = ondisk->options.order;
810 header->crypt_type = ondisk->options.crypt_type;
811 header->comp_type = ondisk->options.comp_type;
812 /* The rest aren't used for format 1 images */
813 header->stripe_unit = 0;
814 header->stripe_count = 0;
815 header->features = 0;
817 ceph_put_snap_context(header->snapc);
818 kfree(header->snap_names);
819 kfree(header->snap_sizes);
822 /* The remaining fields always get updated (when we refresh) */
824 header->image_size = le64_to_cpu(ondisk->image_size);
825 header->snapc = snapc;
826 header->snap_names = snap_names;
827 header->snap_sizes = snap_sizes;
829 /* Make sure mapping size is consistent with header info */
831 if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time)
832 if (rbd_dev->mapping.size != header->image_size)
833 rbd_dev->mapping.size = header->image_size;
835 up_write(&rbd_dev->header_rwsem);
843 ceph_put_snap_context(snapc);
844 kfree(object_prefix);
849 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
851 const char *snap_name;
853 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
855 /* Skip over names until we find the one we are looking for */
857 snap_name = rbd_dev->header.snap_names;
859 snap_name += strlen(snap_name) + 1;
861 return kstrdup(snap_name, GFP_KERNEL);
865 * Snapshot id comparison function for use with qsort()/bsearch().
866 * Note that result is for snapshots in *descending* order.
868 static int snapid_compare_reverse(const void *s1, const void *s2)
870 u64 snap_id1 = *(u64 *)s1;
871 u64 snap_id2 = *(u64 *)s2;
873 if (snap_id1 < snap_id2)
875 return snap_id1 == snap_id2 ? 0 : -1;
879 * Search a snapshot context to see if the given snapshot id is
882 * Returns the position of the snapshot id in the array if it's found,
883 * or BAD_SNAP_INDEX otherwise.
885 * Note: The snapshot array is in kept sorted (by the osd) in
886 * reverse order, highest snapshot id first.
888 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
890 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
893 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
894 sizeof (snap_id), snapid_compare_reverse);
896 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
899 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
904 which = rbd_dev_snap_index(rbd_dev, snap_id);
905 if (which == BAD_SNAP_INDEX)
908 return _rbd_dev_v1_snap_name(rbd_dev, which);
911 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
913 if (snap_id == CEPH_NOSNAP)
914 return RBD_SNAP_HEAD_NAME;
916 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
917 if (rbd_dev->image_format == 1)
918 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
920 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
923 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
926 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
927 if (snap_id == CEPH_NOSNAP) {
928 *snap_size = rbd_dev->header.image_size;
929 } else if (rbd_dev->image_format == 1) {
932 which = rbd_dev_snap_index(rbd_dev, snap_id);
933 if (which == BAD_SNAP_INDEX)
936 *snap_size = rbd_dev->header.snap_sizes[which];
941 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
950 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
953 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
954 if (snap_id == CEPH_NOSNAP) {
955 *snap_features = rbd_dev->header.features;
956 } else if (rbd_dev->image_format == 1) {
957 *snap_features = 0; /* No features for format 1 */
962 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
966 *snap_features = features;
971 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
973 u64 snap_id = rbd_dev->spec->snap_id;
978 ret = rbd_snap_size(rbd_dev, snap_id, &size);
981 ret = rbd_snap_features(rbd_dev, snap_id, &features);
985 rbd_dev->mapping.size = size;
986 rbd_dev->mapping.features = features;
991 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
993 rbd_dev->mapping.size = 0;
994 rbd_dev->mapping.features = 0;
997 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1003 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1006 segment = offset >> rbd_dev->header.obj_order;
1007 ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
1008 rbd_dev->header.object_prefix, segment);
1009 if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
1010 pr_err("error formatting segment name for #%llu (%d)\n",
1019 static void rbd_segment_name_free(const char *name)
1021 /* The explicit cast here is needed to drop the const qualifier */
1023 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1026 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1028 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1030 return offset & (segment_size - 1);
1033 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1034 u64 offset, u64 length)
1036 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1038 offset &= segment_size - 1;
1040 rbd_assert(length <= U64_MAX - offset);
1041 if (offset + length > segment_size)
1042 length = segment_size - offset;
1048 * returns the size of an object in the image
1050 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1052 return 1 << header->obj_order;
1059 static void bio_chain_put(struct bio *chain)
1065 chain = chain->bi_next;
1071 * zeros a bio chain, starting at specific offset
1073 static void zero_bio_chain(struct bio *chain, int start_ofs)
1076 unsigned long flags;
1082 bio_for_each_segment(bv, chain, i) {
1083 if (pos + bv->bv_len > start_ofs) {
1084 int remainder = max(start_ofs - pos, 0);
1085 buf = bvec_kmap_irq(bv, &flags);
1086 memset(buf + remainder, 0,
1087 bv->bv_len - remainder);
1088 bvec_kunmap_irq(buf, &flags);
1093 chain = chain->bi_next;
1098 * similar to zero_bio_chain(), zeros data defined by a page array,
1099 * starting at the given byte offset from the start of the array and
1100 * continuing up to the given end offset. The pages array is
1101 * assumed to be big enough to hold all bytes up to the end.
1103 static void zero_pages(struct page **pages, u64 offset, u64 end)
1105 struct page **page = &pages[offset >> PAGE_SHIFT];
1107 rbd_assert(end > offset);
1108 rbd_assert(end - offset <= (u64)SIZE_MAX);
1109 while (offset < end) {
1112 unsigned long flags;
1115 page_offset = (size_t)(offset & ~PAGE_MASK);
1116 length = min(PAGE_SIZE - page_offset, (size_t)(end - offset));
1117 local_irq_save(flags);
1118 kaddr = kmap_atomic(*page);
1119 memset(kaddr + page_offset, 0, length);
1120 kunmap_atomic(kaddr);
1121 local_irq_restore(flags);
1129 * Clone a portion of a bio, starting at the given byte offset
1130 * and continuing for the number of bytes indicated.
1132 static struct bio *bio_clone_range(struct bio *bio_src,
1133 unsigned int offset,
1141 unsigned short end_idx;
1142 unsigned short vcnt;
1145 /* Handle the easy case for the caller */
1147 if (!offset && len == bio_src->bi_size)
1148 return bio_clone(bio_src, gfpmask);
1150 if (WARN_ON_ONCE(!len))
1152 if (WARN_ON_ONCE(len > bio_src->bi_size))
1154 if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
1157 /* Find first affected segment... */
1160 __bio_for_each_segment(bv, bio_src, idx, 0) {
1161 if (resid < bv->bv_len)
1163 resid -= bv->bv_len;
1167 /* ...and the last affected segment */
1170 __bio_for_each_segment(bv, bio_src, end_idx, idx) {
1171 if (resid <= bv->bv_len)
1173 resid -= bv->bv_len;
1175 vcnt = end_idx - idx + 1;
1177 /* Build the clone */
1179 bio = bio_alloc(gfpmask, (unsigned int) vcnt);
1181 return NULL; /* ENOMEM */
1183 bio->bi_bdev = bio_src->bi_bdev;
1184 bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
1185 bio->bi_rw = bio_src->bi_rw;
1186 bio->bi_flags |= 1 << BIO_CLONED;
1189 * Copy over our part of the bio_vec, then update the first
1190 * and last (or only) entries.
1192 memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
1193 vcnt * sizeof (struct bio_vec));
1194 bio->bi_io_vec[0].bv_offset += voff;
1196 bio->bi_io_vec[0].bv_len -= voff;
1197 bio->bi_io_vec[vcnt - 1].bv_len = resid;
1199 bio->bi_io_vec[0].bv_len = len;
1202 bio->bi_vcnt = vcnt;
1210 * Clone a portion of a bio chain, starting at the given byte offset
1211 * into the first bio in the source chain and continuing for the
1212 * number of bytes indicated. The result is another bio chain of
1213 * exactly the given length, or a null pointer on error.
1215 * The bio_src and offset parameters are both in-out. On entry they
1216 * refer to the first source bio and the offset into that bio where
1217 * the start of data to be cloned is located.
1219 * On return, bio_src is updated to refer to the bio in the source
1220 * chain that contains first un-cloned byte, and *offset will
1221 * contain the offset of that byte within that bio.
1223 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1224 unsigned int *offset,
1228 struct bio *bi = *bio_src;
1229 unsigned int off = *offset;
1230 struct bio *chain = NULL;
1233 /* Build up a chain of clone bios up to the limit */
1235 if (!bi || off >= bi->bi_size || !len)
1236 return NULL; /* Nothing to clone */
1240 unsigned int bi_size;
1244 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1245 goto out_err; /* EINVAL; ran out of bio's */
1247 bi_size = min_t(unsigned int, bi->bi_size - off, len);
1248 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1250 goto out_err; /* ENOMEM */
1253 end = &bio->bi_next;
1256 if (off == bi->bi_size) {
1267 bio_chain_put(chain);
1273 * The default/initial value for all object request flags is 0. For
1274 * each flag, once its value is set to 1 it is never reset to 0
1277 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1279 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1280 struct rbd_device *rbd_dev;
1282 rbd_dev = obj_request->img_request->rbd_dev;
1283 rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
1288 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1291 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1294 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1296 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1297 struct rbd_device *rbd_dev = NULL;
1299 if (obj_request_img_data_test(obj_request))
1300 rbd_dev = obj_request->img_request->rbd_dev;
1301 rbd_warn(rbd_dev, "obj_request %p already marked done\n",
1306 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1309 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1313 * This sets the KNOWN flag after (possibly) setting the EXISTS
1314 * flag. The latter is set based on the "exists" value provided.
1316 * Note that for our purposes once an object exists it never goes
1317 * away again. It's possible that the response from two existence
1318 * checks are separated by the creation of the target object, and
1319 * the first ("doesn't exist") response arrives *after* the second
1320 * ("does exist"). In that case we ignore the second one.
1322 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1326 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1327 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1331 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1334 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1337 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1340 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1343 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1345 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1346 atomic_read(&obj_request->kref.refcount));
1347 kref_get(&obj_request->kref);
1350 static void rbd_obj_request_destroy(struct kref *kref);
1351 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1353 rbd_assert(obj_request != NULL);
1354 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1355 atomic_read(&obj_request->kref.refcount));
1356 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1359 static void rbd_img_request_get(struct rbd_img_request *img_request)
1361 dout("%s: img %p (was %d)\n", __func__, img_request,
1362 atomic_read(&img_request->kref.refcount));
1363 kref_get(&img_request->kref);
1366 static void rbd_img_request_destroy(struct kref *kref);
1367 static void rbd_img_request_put(struct rbd_img_request *img_request)
1369 rbd_assert(img_request != NULL);
1370 dout("%s: img %p (was %d)\n", __func__, img_request,
1371 atomic_read(&img_request->kref.refcount));
1372 kref_put(&img_request->kref, rbd_img_request_destroy);
1375 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1376 struct rbd_obj_request *obj_request)
1378 rbd_assert(obj_request->img_request == NULL);
1380 /* Image request now owns object's original reference */
1381 obj_request->img_request = img_request;
1382 obj_request->which = img_request->obj_request_count;
1383 rbd_assert(!obj_request_img_data_test(obj_request));
1384 obj_request_img_data_set(obj_request);
1385 rbd_assert(obj_request->which != BAD_WHICH);
1386 img_request->obj_request_count++;
1387 list_add_tail(&obj_request->links, &img_request->obj_requests);
1388 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1389 obj_request->which);
1392 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1393 struct rbd_obj_request *obj_request)
1395 rbd_assert(obj_request->which != BAD_WHICH);
1397 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1398 obj_request->which);
1399 list_del(&obj_request->links);
1400 rbd_assert(img_request->obj_request_count > 0);
1401 img_request->obj_request_count--;
1402 rbd_assert(obj_request->which == img_request->obj_request_count);
1403 obj_request->which = BAD_WHICH;
1404 rbd_assert(obj_request_img_data_test(obj_request));
1405 rbd_assert(obj_request->img_request == img_request);
1406 obj_request->img_request = NULL;
1407 obj_request->callback = NULL;
1408 rbd_obj_request_put(obj_request);
1411 static bool obj_request_type_valid(enum obj_request_type type)
1414 case OBJ_REQUEST_NODATA:
1415 case OBJ_REQUEST_BIO:
1416 case OBJ_REQUEST_PAGES:
1423 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1424 struct rbd_obj_request *obj_request)
1426 dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1428 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1431 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1434 dout("%s: img %p\n", __func__, img_request);
1437 * If no error occurred, compute the aggregate transfer
1438 * count for the image request. We could instead use
1439 * atomic64_cmpxchg() to update it as each object request
1440 * completes; not clear which way is better off hand.
1442 if (!img_request->result) {
1443 struct rbd_obj_request *obj_request;
1446 for_each_obj_request(img_request, obj_request)
1447 xferred += obj_request->xferred;
1448 img_request->xferred = xferred;
1451 if (img_request->callback)
1452 img_request->callback(img_request);
1454 rbd_img_request_put(img_request);
1457 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1459 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1461 dout("%s: obj %p\n", __func__, obj_request);
1463 return wait_for_completion_interruptible(&obj_request->completion);
1467 * The default/initial value for all image request flags is 0. Each
1468 * is conditionally set to 1 at image request initialization time
1469 * and currently never change thereafter.
1471 static void img_request_write_set(struct rbd_img_request *img_request)
1473 set_bit(IMG_REQ_WRITE, &img_request->flags);
1477 static bool img_request_write_test(struct rbd_img_request *img_request)
1480 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1483 static void img_request_child_set(struct rbd_img_request *img_request)
1485 set_bit(IMG_REQ_CHILD, &img_request->flags);
1489 static bool img_request_child_test(struct rbd_img_request *img_request)
1492 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1495 static void img_request_layered_set(struct rbd_img_request *img_request)
1497 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1501 static bool img_request_layered_test(struct rbd_img_request *img_request)
1504 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1508 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1510 u64 xferred = obj_request->xferred;
1511 u64 length = obj_request->length;
1513 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1514 obj_request, obj_request->img_request, obj_request->result,
1517 * ENOENT means a hole in the image. We zero-fill the
1518 * entire length of the request. A short read also implies
1519 * zero-fill to the end of the request. Either way we
1520 * update the xferred count to indicate the whole request
1523 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1524 if (obj_request->result == -ENOENT) {
1525 if (obj_request->type == OBJ_REQUEST_BIO)
1526 zero_bio_chain(obj_request->bio_list, 0);
1528 zero_pages(obj_request->pages, 0, length);
1529 obj_request->result = 0;
1530 obj_request->xferred = length;
1531 } else if (xferred < length && !obj_request->result) {
1532 if (obj_request->type == OBJ_REQUEST_BIO)
1533 zero_bio_chain(obj_request->bio_list, xferred);
1535 zero_pages(obj_request->pages, xferred, length);
1536 obj_request->xferred = length;
1538 obj_request_done_set(obj_request);
1541 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1543 dout("%s: obj %p cb %p\n", __func__, obj_request,
1544 obj_request->callback);
1545 if (obj_request->callback)
1546 obj_request->callback(obj_request);
1548 complete_all(&obj_request->completion);
1551 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1553 dout("%s: obj %p\n", __func__, obj_request);
1554 obj_request_done_set(obj_request);
1557 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1559 struct rbd_img_request *img_request = NULL;
1560 struct rbd_device *rbd_dev = NULL;
1561 bool layered = false;
1563 if (obj_request_img_data_test(obj_request)) {
1564 img_request = obj_request->img_request;
1565 layered = img_request && img_request_layered_test(img_request);
1566 rbd_dev = img_request->rbd_dev;
1569 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1570 obj_request, img_request, obj_request->result,
1571 obj_request->xferred, obj_request->length);
1572 if (layered && obj_request->result == -ENOENT &&
1573 obj_request->img_offset < rbd_dev->parent_overlap)
1574 rbd_img_parent_read(obj_request);
1575 else if (img_request)
1576 rbd_img_obj_request_read_callback(obj_request);
1578 obj_request_done_set(obj_request);
1581 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1583 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1584 obj_request->result, obj_request->length);
1586 * There is no such thing as a successful short write. Set
1587 * it to our originally-requested length.
1589 obj_request->xferred = obj_request->length;
1590 obj_request_done_set(obj_request);
1594 * For a simple stat call there's nothing to do. We'll do more if
1595 * this is part of a write sequence for a layered image.
1597 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1599 dout("%s: obj %p\n", __func__, obj_request);
1600 obj_request_done_set(obj_request);
1603 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1604 struct ceph_msg *msg)
1606 struct rbd_obj_request *obj_request = osd_req->r_priv;
1609 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1610 rbd_assert(osd_req == obj_request->osd_req);
1611 if (obj_request_img_data_test(obj_request)) {
1612 rbd_assert(obj_request->img_request);
1613 rbd_assert(obj_request->which != BAD_WHICH);
1615 rbd_assert(obj_request->which == BAD_WHICH);
1618 if (osd_req->r_result < 0)
1619 obj_request->result = osd_req->r_result;
1621 BUG_ON(osd_req->r_num_ops > 2);
1624 * We support a 64-bit length, but ultimately it has to be
1625 * passed to blk_end_request(), which takes an unsigned int.
1627 obj_request->xferred = osd_req->r_reply_op_len[0];
1628 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1629 opcode = osd_req->r_ops[0].op;
1631 case CEPH_OSD_OP_READ:
1632 rbd_osd_read_callback(obj_request);
1634 case CEPH_OSD_OP_WRITE:
1635 rbd_osd_write_callback(obj_request);
1637 case CEPH_OSD_OP_STAT:
1638 rbd_osd_stat_callback(obj_request);
1640 case CEPH_OSD_OP_CALL:
1641 case CEPH_OSD_OP_NOTIFY_ACK:
1642 case CEPH_OSD_OP_WATCH:
1643 rbd_osd_trivial_callback(obj_request);
1646 rbd_warn(NULL, "%s: unsupported op %hu\n",
1647 obj_request->object_name, (unsigned short) opcode);
1651 if (obj_request_done_test(obj_request))
1652 rbd_obj_request_complete(obj_request);
1655 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1657 struct rbd_img_request *img_request = obj_request->img_request;
1658 struct ceph_osd_request *osd_req = obj_request->osd_req;
1661 rbd_assert(osd_req != NULL);
1663 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1664 ceph_osdc_build_request(osd_req, obj_request->offset,
1665 NULL, snap_id, NULL);
1668 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1670 struct rbd_img_request *img_request = obj_request->img_request;
1671 struct ceph_osd_request *osd_req = obj_request->osd_req;
1672 struct ceph_snap_context *snapc;
1673 struct timespec mtime = CURRENT_TIME;
1675 rbd_assert(osd_req != NULL);
1677 snapc = img_request ? img_request->snapc : NULL;
1678 ceph_osdc_build_request(osd_req, obj_request->offset,
1679 snapc, CEPH_NOSNAP, &mtime);
1682 static struct ceph_osd_request *rbd_osd_req_create(
1683 struct rbd_device *rbd_dev,
1685 struct rbd_obj_request *obj_request)
1687 struct ceph_snap_context *snapc = NULL;
1688 struct ceph_osd_client *osdc;
1689 struct ceph_osd_request *osd_req;
1691 if (obj_request_img_data_test(obj_request)) {
1692 struct rbd_img_request *img_request = obj_request->img_request;
1694 rbd_assert(write_request ==
1695 img_request_write_test(img_request));
1697 snapc = img_request->snapc;
1700 /* Allocate and initialize the request, for the single op */
1702 osdc = &rbd_dev->rbd_client->client->osdc;
1703 osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
1705 return NULL; /* ENOMEM */
1708 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1710 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1712 osd_req->r_callback = rbd_osd_req_callback;
1713 osd_req->r_priv = obj_request;
1715 osd_req->r_oid_len = strlen(obj_request->object_name);
1716 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1717 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1719 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1725 * Create a copyup osd request based on the information in the
1726 * object request supplied. A copyup request has two osd ops,
1727 * a copyup method call, and a "normal" write request.
1729 static struct ceph_osd_request *
1730 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1732 struct rbd_img_request *img_request;
1733 struct ceph_snap_context *snapc;
1734 struct rbd_device *rbd_dev;
1735 struct ceph_osd_client *osdc;
1736 struct ceph_osd_request *osd_req;
1738 rbd_assert(obj_request_img_data_test(obj_request));
1739 img_request = obj_request->img_request;
1740 rbd_assert(img_request);
1741 rbd_assert(img_request_write_test(img_request));
1743 /* Allocate and initialize the request, for the two ops */
1745 snapc = img_request->snapc;
1746 rbd_dev = img_request->rbd_dev;
1747 osdc = &rbd_dev->rbd_client->client->osdc;
1748 osd_req = ceph_osdc_alloc_request(osdc, snapc, 2, false, GFP_ATOMIC);
1750 return NULL; /* ENOMEM */
1752 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1753 osd_req->r_callback = rbd_osd_req_callback;
1754 osd_req->r_priv = obj_request;
1756 osd_req->r_oid_len = strlen(obj_request->object_name);
1757 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1758 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1760 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1766 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1768 ceph_osdc_put_request(osd_req);
1771 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1773 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1774 u64 offset, u64 length,
1775 enum obj_request_type type)
1777 struct rbd_obj_request *obj_request;
1781 rbd_assert(obj_request_type_valid(type));
1783 size = strlen(object_name) + 1;
1784 name = kmalloc(size, GFP_KERNEL);
1788 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
1794 obj_request->object_name = memcpy(name, object_name, size);
1795 obj_request->offset = offset;
1796 obj_request->length = length;
1797 obj_request->flags = 0;
1798 obj_request->which = BAD_WHICH;
1799 obj_request->type = type;
1800 INIT_LIST_HEAD(&obj_request->links);
1801 init_completion(&obj_request->completion);
1802 kref_init(&obj_request->kref);
1804 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1805 offset, length, (int)type, obj_request);
1810 static void rbd_obj_request_destroy(struct kref *kref)
1812 struct rbd_obj_request *obj_request;
1814 obj_request = container_of(kref, struct rbd_obj_request, kref);
1816 dout("%s: obj %p\n", __func__, obj_request);
1818 rbd_assert(obj_request->img_request == NULL);
1819 rbd_assert(obj_request->which == BAD_WHICH);
1821 if (obj_request->osd_req)
1822 rbd_osd_req_destroy(obj_request->osd_req);
1824 rbd_assert(obj_request_type_valid(obj_request->type));
1825 switch (obj_request->type) {
1826 case OBJ_REQUEST_NODATA:
1827 break; /* Nothing to do */
1828 case OBJ_REQUEST_BIO:
1829 if (obj_request->bio_list)
1830 bio_chain_put(obj_request->bio_list);
1832 case OBJ_REQUEST_PAGES:
1833 if (obj_request->pages)
1834 ceph_release_page_vector(obj_request->pages,
1835 obj_request->page_count);
1839 kfree(obj_request->object_name);
1840 obj_request->object_name = NULL;
1841 kmem_cache_free(rbd_obj_request_cache, obj_request);
1845 * Caller is responsible for filling in the list of object requests
1846 * that comprises the image request, and the Linux request pointer
1847 * (if there is one).
1849 static struct rbd_img_request *rbd_img_request_create(
1850 struct rbd_device *rbd_dev,
1851 u64 offset, u64 length,
1855 struct rbd_img_request *img_request;
1857 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC);
1861 if (write_request) {
1862 down_read(&rbd_dev->header_rwsem);
1863 ceph_get_snap_context(rbd_dev->header.snapc);
1864 up_read(&rbd_dev->header_rwsem);
1867 img_request->rq = NULL;
1868 img_request->rbd_dev = rbd_dev;
1869 img_request->offset = offset;
1870 img_request->length = length;
1871 img_request->flags = 0;
1872 if (write_request) {
1873 img_request_write_set(img_request);
1874 img_request->snapc = rbd_dev->header.snapc;
1876 img_request->snap_id = rbd_dev->spec->snap_id;
1879 img_request_child_set(img_request);
1880 if (rbd_dev->parent_spec)
1881 img_request_layered_set(img_request);
1882 spin_lock_init(&img_request->completion_lock);
1883 img_request->next_completion = 0;
1884 img_request->callback = NULL;
1885 img_request->result = 0;
1886 img_request->obj_request_count = 0;
1887 INIT_LIST_HEAD(&img_request->obj_requests);
1888 kref_init(&img_request->kref);
1890 rbd_img_request_get(img_request); /* Avoid a warning */
1891 rbd_img_request_put(img_request); /* TEMPORARY */
1893 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
1894 write_request ? "write" : "read", offset, length,
1900 static void rbd_img_request_destroy(struct kref *kref)
1902 struct rbd_img_request *img_request;
1903 struct rbd_obj_request *obj_request;
1904 struct rbd_obj_request *next_obj_request;
1906 img_request = container_of(kref, struct rbd_img_request, kref);
1908 dout("%s: img %p\n", __func__, img_request);
1910 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1911 rbd_img_obj_request_del(img_request, obj_request);
1912 rbd_assert(img_request->obj_request_count == 0);
1914 if (img_request_write_test(img_request))
1915 ceph_put_snap_context(img_request->snapc);
1917 if (img_request_child_test(img_request))
1918 rbd_obj_request_put(img_request->obj_request);
1920 kmem_cache_free(rbd_img_request_cache, img_request);
1923 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
1925 struct rbd_img_request *img_request;
1926 unsigned int xferred;
1930 rbd_assert(obj_request_img_data_test(obj_request));
1931 img_request = obj_request->img_request;
1933 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
1934 xferred = (unsigned int)obj_request->xferred;
1935 result = obj_request->result;
1937 struct rbd_device *rbd_dev = img_request->rbd_dev;
1939 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
1940 img_request_write_test(img_request) ? "write" : "read",
1941 obj_request->length, obj_request->img_offset,
1942 obj_request->offset);
1943 rbd_warn(rbd_dev, " result %d xferred %x\n",
1945 if (!img_request->result)
1946 img_request->result = result;
1949 /* Image object requests don't own their page array */
1951 if (obj_request->type == OBJ_REQUEST_PAGES) {
1952 obj_request->pages = NULL;
1953 obj_request->page_count = 0;
1956 if (img_request_child_test(img_request)) {
1957 rbd_assert(img_request->obj_request != NULL);
1958 more = obj_request->which < img_request->obj_request_count - 1;
1960 rbd_assert(img_request->rq != NULL);
1961 more = blk_end_request(img_request->rq, result, xferred);
1967 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
1969 struct rbd_img_request *img_request;
1970 u32 which = obj_request->which;
1973 rbd_assert(obj_request_img_data_test(obj_request));
1974 img_request = obj_request->img_request;
1976 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1977 rbd_assert(img_request != NULL);
1978 rbd_assert(img_request->obj_request_count > 0);
1979 rbd_assert(which != BAD_WHICH);
1980 rbd_assert(which < img_request->obj_request_count);
1981 rbd_assert(which >= img_request->next_completion);
1983 spin_lock_irq(&img_request->completion_lock);
1984 if (which != img_request->next_completion)
1987 for_each_obj_request_from(img_request, obj_request) {
1989 rbd_assert(which < img_request->obj_request_count);
1991 if (!obj_request_done_test(obj_request))
1993 more = rbd_img_obj_end_request(obj_request);
1997 rbd_assert(more ^ (which == img_request->obj_request_count));
1998 img_request->next_completion = which;
2000 spin_unlock_irq(&img_request->completion_lock);
2003 rbd_img_request_complete(img_request);
2007 * Split up an image request into one or more object requests, each
2008 * to a different object. The "type" parameter indicates whether
2009 * "data_desc" is the pointer to the head of a list of bio
2010 * structures, or the base of a page array. In either case this
2011 * function assumes data_desc describes memory sufficient to hold
2012 * all data described by the image request.
2014 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2015 enum obj_request_type type,
2018 struct rbd_device *rbd_dev = img_request->rbd_dev;
2019 struct rbd_obj_request *obj_request = NULL;
2020 struct rbd_obj_request *next_obj_request;
2021 bool write_request = img_request_write_test(img_request);
2022 struct bio *bio_list;
2023 unsigned int bio_offset = 0;
2024 struct page **pages;
2029 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2030 (int)type, data_desc);
2032 opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
2033 img_offset = img_request->offset;
2034 resid = img_request->length;
2035 rbd_assert(resid > 0);
2037 if (type == OBJ_REQUEST_BIO) {
2038 bio_list = data_desc;
2039 rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
2041 rbd_assert(type == OBJ_REQUEST_PAGES);
2046 struct ceph_osd_request *osd_req;
2047 const char *object_name;
2051 object_name = rbd_segment_name(rbd_dev, img_offset);
2054 offset = rbd_segment_offset(rbd_dev, img_offset);
2055 length = rbd_segment_length(rbd_dev, img_offset, resid);
2056 obj_request = rbd_obj_request_create(object_name,
2057 offset, length, type);
2058 /* object request has its own copy of the object name */
2059 rbd_segment_name_free(object_name);
2063 if (type == OBJ_REQUEST_BIO) {
2064 unsigned int clone_size;
2066 rbd_assert(length <= (u64)UINT_MAX);
2067 clone_size = (unsigned int)length;
2068 obj_request->bio_list =
2069 bio_chain_clone_range(&bio_list,
2073 if (!obj_request->bio_list)
2076 unsigned int page_count;
2078 obj_request->pages = pages;
2079 page_count = (u32)calc_pages_for(offset, length);
2080 obj_request->page_count = page_count;
2081 if ((offset + length) & ~PAGE_MASK)
2082 page_count--; /* more on last page */
2083 pages += page_count;
2086 osd_req = rbd_osd_req_create(rbd_dev, write_request,
2090 obj_request->osd_req = osd_req;
2091 obj_request->callback = rbd_img_obj_callback;
2093 osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
2095 if (type == OBJ_REQUEST_BIO)
2096 osd_req_op_extent_osd_data_bio(osd_req, 0,
2097 obj_request->bio_list, length);
2099 osd_req_op_extent_osd_data_pages(osd_req, 0,
2100 obj_request->pages, length,
2101 offset & ~PAGE_MASK, false, false);
2104 rbd_osd_req_format_write(obj_request);
2106 rbd_osd_req_format_read(obj_request);
2108 obj_request->img_offset = img_offset;
2109 rbd_img_obj_request_add(img_request, obj_request);
2111 img_offset += length;
2118 rbd_obj_request_put(obj_request);
2120 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2121 rbd_obj_request_put(obj_request);
2127 rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2129 struct rbd_img_request *img_request;
2130 struct rbd_device *rbd_dev;
2134 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2135 rbd_assert(obj_request_img_data_test(obj_request));
2136 img_request = obj_request->img_request;
2137 rbd_assert(img_request);
2139 rbd_dev = img_request->rbd_dev;
2140 rbd_assert(rbd_dev);
2141 length = (u64)1 << rbd_dev->header.obj_order;
2142 page_count = (u32)calc_pages_for(0, length);
2144 rbd_assert(obj_request->copyup_pages);
2145 ceph_release_page_vector(obj_request->copyup_pages, page_count);
2146 obj_request->copyup_pages = NULL;
2149 * We want the transfer count to reflect the size of the
2150 * original write request. There is no such thing as a
2151 * successful short write, so if the request was successful
2152 * we can just set it to the originally-requested length.
2154 if (!obj_request->result)
2155 obj_request->xferred = obj_request->length;
2157 /* Finish up with the normal image object callback */
2159 rbd_img_obj_callback(obj_request);
2163 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2165 struct rbd_obj_request *orig_request;
2166 struct ceph_osd_request *osd_req;
2167 struct ceph_osd_client *osdc;
2168 struct rbd_device *rbd_dev;
2169 struct page **pages;
2174 rbd_assert(img_request_child_test(img_request));
2176 /* First get what we need from the image request */
2178 pages = img_request->copyup_pages;
2179 rbd_assert(pages != NULL);
2180 img_request->copyup_pages = NULL;
2182 orig_request = img_request->obj_request;
2183 rbd_assert(orig_request != NULL);
2184 rbd_assert(orig_request->type == OBJ_REQUEST_BIO);
2185 result = img_request->result;
2186 obj_size = img_request->length;
2187 xferred = img_request->xferred;
2189 rbd_dev = img_request->rbd_dev;
2190 rbd_assert(rbd_dev);
2191 rbd_assert(obj_size == (u64)1 << rbd_dev->header.obj_order);
2193 rbd_img_request_put(img_request);
2198 /* Allocate the new copyup osd request for the original request */
2201 rbd_assert(!orig_request->osd_req);
2202 osd_req = rbd_osd_req_create_copyup(orig_request);
2205 orig_request->osd_req = osd_req;
2206 orig_request->copyup_pages = pages;
2208 /* Initialize the copyup op */
2210 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2211 osd_req_op_cls_request_data_pages(osd_req, 0, pages, obj_size, 0,
2214 /* Then the original write request op */
2216 osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
2217 orig_request->offset,
2218 orig_request->length, 0, 0);
2219 osd_req_op_extent_osd_data_bio(osd_req, 1, orig_request->bio_list,
2220 orig_request->length);
2222 rbd_osd_req_format_write(orig_request);
2224 /* All set, send it off. */
2226 orig_request->callback = rbd_img_obj_copyup_callback;
2227 osdc = &rbd_dev->rbd_client->client->osdc;
2228 result = rbd_obj_request_submit(osdc, orig_request);
2232 /* Record the error code and complete the request */
2234 orig_request->result = result;
2235 orig_request->xferred = 0;
2236 obj_request_done_set(orig_request);
2237 rbd_obj_request_complete(orig_request);
2241 * Read from the parent image the range of data that covers the
2242 * entire target of the given object request. This is used for
2243 * satisfying a layered image write request when the target of an
2244 * object request from the image request does not exist.
2246 * A page array big enough to hold the returned data is allocated
2247 * and supplied to rbd_img_request_fill() as the "data descriptor."
2248 * When the read completes, this page array will be transferred to
2249 * the original object request for the copyup operation.
2251 * If an error occurs, record it as the result of the original
2252 * object request and mark it done so it gets completed.
2254 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2256 struct rbd_img_request *img_request = NULL;
2257 struct rbd_img_request *parent_request = NULL;
2258 struct rbd_device *rbd_dev;
2261 struct page **pages = NULL;
2265 rbd_assert(obj_request_img_data_test(obj_request));
2266 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2268 img_request = obj_request->img_request;
2269 rbd_assert(img_request != NULL);
2270 rbd_dev = img_request->rbd_dev;
2271 rbd_assert(rbd_dev->parent != NULL);
2274 * First things first. The original osd request is of no
2275 * use to use any more, we'll need a new one that can hold
2276 * the two ops in a copyup request. We'll get that later,
2277 * but for now we can release the old one.
2279 rbd_osd_req_destroy(obj_request->osd_req);
2280 obj_request->osd_req = NULL;
2283 * Determine the byte range covered by the object in the
2284 * child image to which the original request was to be sent.
2286 img_offset = obj_request->img_offset - obj_request->offset;
2287 length = (u64)1 << rbd_dev->header.obj_order;
2290 * There is no defined parent data beyond the parent
2291 * overlap, so limit what we read at that boundary if
2294 if (img_offset + length > rbd_dev->parent_overlap) {
2295 rbd_assert(img_offset < rbd_dev->parent_overlap);
2296 length = rbd_dev->parent_overlap - img_offset;
2300 * Allocate a page array big enough to receive the data read
2303 page_count = (u32)calc_pages_for(0, length);
2304 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2305 if (IS_ERR(pages)) {
2306 result = PTR_ERR(pages);
2312 parent_request = rbd_img_request_create(rbd_dev->parent,
2315 if (!parent_request)
2317 rbd_obj_request_get(obj_request);
2318 parent_request->obj_request = obj_request;
2320 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2323 parent_request->copyup_pages = pages;
2325 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2326 result = rbd_img_request_submit(parent_request);
2330 parent_request->copyup_pages = NULL;
2331 parent_request->obj_request = NULL;
2332 rbd_obj_request_put(obj_request);
2335 ceph_release_page_vector(pages, page_count);
2337 rbd_img_request_put(parent_request);
2338 obj_request->result = result;
2339 obj_request->xferred = 0;
2340 obj_request_done_set(obj_request);
2345 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2347 struct rbd_obj_request *orig_request;
2350 rbd_assert(!obj_request_img_data_test(obj_request));
2353 * All we need from the object request is the original
2354 * request and the result of the STAT op. Grab those, then
2355 * we're done with the request.
2357 orig_request = obj_request->obj_request;
2358 obj_request->obj_request = NULL;
2359 rbd_assert(orig_request);
2360 rbd_assert(orig_request->img_request);
2362 result = obj_request->result;
2363 obj_request->result = 0;
2365 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2366 obj_request, orig_request, result,
2367 obj_request->xferred, obj_request->length);
2368 rbd_obj_request_put(obj_request);
2370 rbd_assert(orig_request);
2371 rbd_assert(orig_request->img_request);
2374 * Our only purpose here is to determine whether the object
2375 * exists, and we don't want to treat the non-existence as
2376 * an error. If something else comes back, transfer the
2377 * error to the original request and complete it now.
2380 obj_request_existence_set(orig_request, true);
2381 } else if (result == -ENOENT) {
2382 obj_request_existence_set(orig_request, false);
2383 } else if (result) {
2384 orig_request->result = result;
2389 * Resubmit the original request now that we have recorded
2390 * whether the target object exists.
2392 orig_request->result = rbd_img_obj_request_submit(orig_request);
2394 if (orig_request->result)
2395 rbd_obj_request_complete(orig_request);
2396 rbd_obj_request_put(orig_request);
2399 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2401 struct rbd_obj_request *stat_request;
2402 struct rbd_device *rbd_dev;
2403 struct ceph_osd_client *osdc;
2404 struct page **pages = NULL;
2410 * The response data for a STAT call consists of:
2417 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2418 page_count = (u32)calc_pages_for(0, size);
2419 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2421 return PTR_ERR(pages);
2424 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2429 rbd_obj_request_get(obj_request);
2430 stat_request->obj_request = obj_request;
2431 stat_request->pages = pages;
2432 stat_request->page_count = page_count;
2434 rbd_assert(obj_request->img_request);
2435 rbd_dev = obj_request->img_request->rbd_dev;
2436 stat_request->osd_req = rbd_osd_req_create(rbd_dev, false,
2438 if (!stat_request->osd_req)
2440 stat_request->callback = rbd_img_obj_exists_callback;
2442 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2443 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2445 rbd_osd_req_format_read(stat_request);
2447 osdc = &rbd_dev->rbd_client->client->osdc;
2448 ret = rbd_obj_request_submit(osdc, stat_request);
2451 rbd_obj_request_put(obj_request);
2456 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2458 struct rbd_img_request *img_request;
2459 struct rbd_device *rbd_dev;
2462 rbd_assert(obj_request_img_data_test(obj_request));
2464 img_request = obj_request->img_request;
2465 rbd_assert(img_request);
2466 rbd_dev = img_request->rbd_dev;
2469 * Only writes to layered images need special handling.
2470 * Reads and non-layered writes are simple object requests.
2471 * Layered writes that start beyond the end of the overlap
2472 * with the parent have no parent data, so they too are
2473 * simple object requests. Finally, if the target object is
2474 * known to already exist, its parent data has already been
2475 * copied, so a write to the object can also be handled as a
2476 * simple object request.
2478 if (!img_request_write_test(img_request) ||
2479 !img_request_layered_test(img_request) ||
2480 rbd_dev->parent_overlap <= obj_request->img_offset ||
2481 ((known = obj_request_known_test(obj_request)) &&
2482 obj_request_exists_test(obj_request))) {
2484 struct rbd_device *rbd_dev;
2485 struct ceph_osd_client *osdc;
2487 rbd_dev = obj_request->img_request->rbd_dev;
2488 osdc = &rbd_dev->rbd_client->client->osdc;
2490 return rbd_obj_request_submit(osdc, obj_request);
2494 * It's a layered write. The target object might exist but
2495 * we may not know that yet. If we know it doesn't exist,
2496 * start by reading the data for the full target object from
2497 * the parent so we can use it for a copyup to the target.
2500 return rbd_img_obj_parent_read_full(obj_request);
2502 /* We don't know whether the target exists. Go find out. */
2504 return rbd_img_obj_exists_submit(obj_request);
2507 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2509 struct rbd_obj_request *obj_request;
2510 struct rbd_obj_request *next_obj_request;
2512 dout("%s: img %p\n", __func__, img_request);
2513 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2516 ret = rbd_img_obj_request_submit(obj_request);
2524 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2526 struct rbd_obj_request *obj_request;
2527 struct rbd_device *rbd_dev;
2530 rbd_assert(img_request_child_test(img_request));
2532 obj_request = img_request->obj_request;
2533 rbd_assert(obj_request);
2534 rbd_assert(obj_request->img_request);
2536 obj_request->result = img_request->result;
2537 if (obj_request->result)
2541 * We need to zero anything beyond the parent overlap
2542 * boundary. Since rbd_img_obj_request_read_callback()
2543 * will zero anything beyond the end of a short read, an
2544 * easy way to do this is to pretend the data from the
2545 * parent came up short--ending at the overlap boundary.
2547 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2548 obj_end = obj_request->img_offset + obj_request->length;
2549 rbd_dev = obj_request->img_request->rbd_dev;
2550 if (obj_end > rbd_dev->parent_overlap) {
2553 if (obj_request->img_offset < rbd_dev->parent_overlap)
2554 xferred = rbd_dev->parent_overlap -
2555 obj_request->img_offset;
2557 obj_request->xferred = min(img_request->xferred, xferred);
2559 obj_request->xferred = img_request->xferred;
2562 rbd_img_request_put(img_request);
2563 rbd_img_obj_request_read_callback(obj_request);
2564 rbd_obj_request_complete(obj_request);
2567 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2569 struct rbd_device *rbd_dev;
2570 struct rbd_img_request *img_request;
2573 rbd_assert(obj_request_img_data_test(obj_request));
2574 rbd_assert(obj_request->img_request != NULL);
2575 rbd_assert(obj_request->result == (s32) -ENOENT);
2576 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2578 rbd_dev = obj_request->img_request->rbd_dev;
2579 rbd_assert(rbd_dev->parent != NULL);
2580 /* rbd_read_finish(obj_request, obj_request->length); */
2581 img_request = rbd_img_request_create(rbd_dev->parent,
2582 obj_request->img_offset,
2583 obj_request->length,
2589 rbd_obj_request_get(obj_request);
2590 img_request->obj_request = obj_request;
2592 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2593 obj_request->bio_list);
2597 img_request->callback = rbd_img_parent_read_callback;
2598 result = rbd_img_request_submit(img_request);
2605 rbd_img_request_put(img_request);
2606 obj_request->result = result;
2607 obj_request->xferred = 0;
2608 obj_request_done_set(obj_request);
2611 static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id)
2613 struct rbd_obj_request *obj_request;
2614 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2617 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2618 OBJ_REQUEST_NODATA);
2623 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2624 if (!obj_request->osd_req)
2626 obj_request->callback = rbd_obj_request_put;
2628 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
2630 rbd_osd_req_format_read(obj_request);
2632 ret = rbd_obj_request_submit(osdc, obj_request);
2635 rbd_obj_request_put(obj_request);
2640 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2642 struct rbd_device *rbd_dev = (struct rbd_device *)data;
2648 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
2649 rbd_dev->header_name, (unsigned long long)notify_id,
2650 (unsigned int)opcode);
2651 ret = rbd_dev_refresh(rbd_dev);
2653 rbd_warn(rbd_dev, ": header refresh error (%d)\n", ret);
2655 rbd_obj_notify_ack(rbd_dev, notify_id);
2659 * Request sync osd watch/unwatch. The value of "start" determines
2660 * whether a watch request is being initiated or torn down.
2662 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
2664 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2665 struct rbd_obj_request *obj_request;
2668 rbd_assert(start ^ !!rbd_dev->watch_event);
2669 rbd_assert(start ^ !!rbd_dev->watch_request);
2672 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
2673 &rbd_dev->watch_event);
2676 rbd_assert(rbd_dev->watch_event != NULL);
2680 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2681 OBJ_REQUEST_NODATA);
2685 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request);
2686 if (!obj_request->osd_req)
2690 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
2692 ceph_osdc_unregister_linger_request(osdc,
2693 rbd_dev->watch_request->osd_req);
2695 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2696 rbd_dev->watch_event->cookie, 0, start);
2697 rbd_osd_req_format_write(obj_request);
2699 ret = rbd_obj_request_submit(osdc, obj_request);
2702 ret = rbd_obj_request_wait(obj_request);
2705 ret = obj_request->result;
2710 * A watch request is set to linger, so the underlying osd
2711 * request won't go away until we unregister it. We retain
2712 * a pointer to the object request during that time (in
2713 * rbd_dev->watch_request), so we'll keep a reference to
2714 * it. We'll drop that reference (below) after we've
2718 rbd_dev->watch_request = obj_request;
2723 /* We have successfully torn down the watch request */
2725 rbd_obj_request_put(rbd_dev->watch_request);
2726 rbd_dev->watch_request = NULL;
2728 /* Cancel the event if we're tearing down, or on error */
2729 ceph_osdc_cancel_event(rbd_dev->watch_event);
2730 rbd_dev->watch_event = NULL;
2732 rbd_obj_request_put(obj_request);
2738 * Synchronous osd object method call. Returns the number of bytes
2739 * returned in the outbound buffer, or a negative error code.
2741 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
2742 const char *object_name,
2743 const char *class_name,
2744 const char *method_name,
2745 const void *outbound,
2746 size_t outbound_size,
2748 size_t inbound_size)
2750 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2751 struct rbd_obj_request *obj_request;
2752 struct page **pages;
2757 * Method calls are ultimately read operations. The result
2758 * should placed into the inbound buffer provided. They
2759 * also supply outbound data--parameters for the object
2760 * method. Currently if this is present it will be a
2763 page_count = (u32)calc_pages_for(0, inbound_size);
2764 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2766 return PTR_ERR(pages);
2769 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
2774 obj_request->pages = pages;
2775 obj_request->page_count = page_count;
2777 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2778 if (!obj_request->osd_req)
2781 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
2782 class_name, method_name);
2783 if (outbound_size) {
2784 struct ceph_pagelist *pagelist;
2786 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
2790 ceph_pagelist_init(pagelist);
2791 ceph_pagelist_append(pagelist, outbound, outbound_size);
2792 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
2795 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
2796 obj_request->pages, inbound_size,
2798 rbd_osd_req_format_read(obj_request);
2800 ret = rbd_obj_request_submit(osdc, obj_request);
2803 ret = rbd_obj_request_wait(obj_request);
2807 ret = obj_request->result;
2811 rbd_assert(obj_request->xferred < (u64)INT_MAX);
2812 ret = (int)obj_request->xferred;
2813 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
2816 rbd_obj_request_put(obj_request);
2818 ceph_release_page_vector(pages, page_count);
2823 static void rbd_request_fn(struct request_queue *q)
2824 __releases(q->queue_lock) __acquires(q->queue_lock)
2826 struct rbd_device *rbd_dev = q->queuedata;
2827 bool read_only = rbd_dev->mapping.read_only;
2831 while ((rq = blk_fetch_request(q))) {
2832 bool write_request = rq_data_dir(rq) == WRITE;
2833 struct rbd_img_request *img_request;
2837 /* Ignore any non-FS requests that filter through. */
2839 if (rq->cmd_type != REQ_TYPE_FS) {
2840 dout("%s: non-fs request type %d\n", __func__,
2841 (int) rq->cmd_type);
2842 __blk_end_request_all(rq, 0);
2846 /* Ignore/skip any zero-length requests */
2848 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
2849 length = (u64) blk_rq_bytes(rq);
2852 dout("%s: zero-length request\n", __func__);
2853 __blk_end_request_all(rq, 0);
2857 spin_unlock_irq(q->queue_lock);
2859 /* Disallow writes to a read-only device */
2861 if (write_request) {
2865 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
2869 * Quit early if the mapped snapshot no longer
2870 * exists. It's still possible the snapshot will
2871 * have disappeared by the time our request arrives
2872 * at the osd, but there's no sense in sending it if
2875 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
2876 dout("request for non-existent snapshot");
2877 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
2883 if (offset && length > U64_MAX - offset + 1) {
2884 rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
2886 goto end_request; /* Shouldn't happen */
2890 if (offset + length > rbd_dev->mapping.size) {
2891 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
2892 offset, length, rbd_dev->mapping.size);
2897 img_request = rbd_img_request_create(rbd_dev, offset, length,
2898 write_request, false);
2902 img_request->rq = rq;
2904 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2907 result = rbd_img_request_submit(img_request);
2909 rbd_img_request_put(img_request);
2911 spin_lock_irq(q->queue_lock);
2913 rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
2914 write_request ? "write" : "read",
2915 length, offset, result);
2917 __blk_end_request_all(rq, result);
2923 * a queue callback. Makes sure that we don't create a bio that spans across
2924 * multiple osd objects. One exception would be with a single page bios,
2925 * which we handle later at bio_chain_clone_range()
2927 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
2928 struct bio_vec *bvec)
2930 struct rbd_device *rbd_dev = q->queuedata;
2931 sector_t sector_offset;
2932 sector_t sectors_per_obj;
2933 sector_t obj_sector_offset;
2937 * Find how far into its rbd object the partition-relative
2938 * bio start sector is to offset relative to the enclosing
2941 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
2942 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
2943 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
2946 * Compute the number of bytes from that offset to the end
2947 * of the object. Account for what's already used by the bio.
2949 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
2950 if (ret > bmd->bi_size)
2951 ret -= bmd->bi_size;
2956 * Don't send back more than was asked for. And if the bio
2957 * was empty, let the whole thing through because: "Note
2958 * that a block device *must* allow a single page to be
2959 * added to an empty bio."
2961 rbd_assert(bvec->bv_len <= PAGE_SIZE);
2962 if (ret > (int) bvec->bv_len || !bmd->bi_size)
2963 ret = (int) bvec->bv_len;
2968 static void rbd_free_disk(struct rbd_device *rbd_dev)
2970 struct gendisk *disk = rbd_dev->disk;
2975 rbd_dev->disk = NULL;
2976 if (disk->flags & GENHD_FL_UP) {
2979 blk_cleanup_queue(disk->queue);
2984 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
2985 const char *object_name,
2986 u64 offset, u64 length, void *buf)
2989 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2990 struct rbd_obj_request *obj_request;
2991 struct page **pages = NULL;
2996 page_count = (u32) calc_pages_for(offset, length);
2997 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2999 ret = PTR_ERR(pages);
3002 obj_request = rbd_obj_request_create(object_name, offset, length,
3007 obj_request->pages = pages;
3008 obj_request->page_count = page_count;
3010 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
3011 if (!obj_request->osd_req)
3014 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3015 offset, length, 0, 0);
3016 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3018 obj_request->length,
3019 obj_request->offset & ~PAGE_MASK,
3021 rbd_osd_req_format_read(obj_request);
3023 ret = rbd_obj_request_submit(osdc, obj_request);
3026 ret = rbd_obj_request_wait(obj_request);
3030 ret = obj_request->result;
3034 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3035 size = (size_t) obj_request->xferred;
3036 ceph_copy_from_page_vector(pages, buf, 0, size);
3037 rbd_assert(size <= (size_t)INT_MAX);
3041 rbd_obj_request_put(obj_request);
3043 ceph_release_page_vector(pages, page_count);
3049 * Read the complete header for the given rbd device. On successful
3050 * return, the rbd_dev->header field will contain up-to-date
3051 * information about the image.
3053 static int rbd_dev_v1_header_read(struct rbd_device *rbd_dev)
3055 struct rbd_image_header_ondisk *ondisk = NULL;
3062 * The complete header will include an array of its 64-bit
3063 * snapshot ids, followed by the names of those snapshots as
3064 * a contiguous block of NUL-terminated strings. Note that
3065 * the number of snapshots could change by the time we read
3066 * it in, in which case we re-read it.
3073 size = sizeof (*ondisk);
3074 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3076 ondisk = kmalloc(size, GFP_KERNEL);
3080 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3084 if ((size_t)ret < size) {
3086 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3090 if (!rbd_dev_ondisk_valid(ondisk)) {
3092 rbd_warn(rbd_dev, "invalid header");
3096 names_size = le64_to_cpu(ondisk->snap_names_len);
3097 want_count = snap_count;
3098 snap_count = le32_to_cpu(ondisk->snap_count);
3099 } while (snap_count != want_count);
3101 ret = rbd_header_from_disk(rbd_dev, ondisk);
3109 * only read the first part of the ondisk header, without the snaps info
3111 static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev)
3113 return rbd_dev_v1_header_read(rbd_dev);
3117 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3118 * has disappeared from the (just updated) snapshot context.
3120 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3124 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3127 snap_id = rbd_dev->spec->snap_id;
3128 if (snap_id == CEPH_NOSNAP)
3131 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3132 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3135 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3140 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
3141 mapping_size = rbd_dev->mapping.size;
3142 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
3143 if (rbd_dev->image_format == 1)
3144 ret = rbd_dev_v1_refresh(rbd_dev);
3146 ret = rbd_dev_v2_refresh(rbd_dev);
3148 /* If it's a mapped snapshot, validate its EXISTS flag */
3150 rbd_exists_validate(rbd_dev);
3151 mutex_unlock(&ctl_mutex);
3152 if (mapping_size != rbd_dev->mapping.size) {
3155 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3156 dout("setting size to %llu sectors", (unsigned long long)size);
3157 set_capacity(rbd_dev->disk, size);
3158 revalidate_disk(rbd_dev->disk);
3164 static int rbd_init_disk(struct rbd_device *rbd_dev)
3166 struct gendisk *disk;
3167 struct request_queue *q;
3170 /* create gendisk info */
3171 disk = alloc_disk(RBD_MINORS_PER_MAJOR);
3175 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3177 disk->major = rbd_dev->major;
3178 disk->first_minor = 0;
3179 disk->fops = &rbd_bd_ops;
3180 disk->private_data = rbd_dev;
3182 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
3186 /* We use the default size, but let's be explicit about it. */
3187 blk_queue_physical_block_size(q, SECTOR_SIZE);
3189 /* set io sizes to object size */
3190 segment_size = rbd_obj_bytes(&rbd_dev->header);
3191 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3192 blk_queue_max_segment_size(q, segment_size);
3193 blk_queue_io_min(q, segment_size);
3194 blk_queue_io_opt(q, segment_size);
3196 blk_queue_merge_bvec(q, rbd_merge_bvec);
3199 q->queuedata = rbd_dev;
3201 rbd_dev->disk = disk;
3214 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3216 return container_of(dev, struct rbd_device, dev);
3219 static ssize_t rbd_size_show(struct device *dev,
3220 struct device_attribute *attr, char *buf)
3222 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3224 return sprintf(buf, "%llu\n",
3225 (unsigned long long)rbd_dev->mapping.size);
3229 * Note this shows the features for whatever's mapped, which is not
3230 * necessarily the base image.
3232 static ssize_t rbd_features_show(struct device *dev,
3233 struct device_attribute *attr, char *buf)
3235 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3237 return sprintf(buf, "0x%016llx\n",
3238 (unsigned long long)rbd_dev->mapping.features);
3241 static ssize_t rbd_major_show(struct device *dev,
3242 struct device_attribute *attr, char *buf)
3244 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3247 return sprintf(buf, "%d\n", rbd_dev->major);
3249 return sprintf(buf, "(none)\n");
3253 static ssize_t rbd_client_id_show(struct device *dev,
3254 struct device_attribute *attr, char *buf)
3256 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3258 return sprintf(buf, "client%lld\n",
3259 ceph_client_id(rbd_dev->rbd_client->client));
3262 static ssize_t rbd_pool_show(struct device *dev,
3263 struct device_attribute *attr, char *buf)
3265 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3267 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3270 static ssize_t rbd_pool_id_show(struct device *dev,
3271 struct device_attribute *attr, char *buf)
3273 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3275 return sprintf(buf, "%llu\n",
3276 (unsigned long long) rbd_dev->spec->pool_id);
3279 static ssize_t rbd_name_show(struct device *dev,
3280 struct device_attribute *attr, char *buf)
3282 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3284 if (rbd_dev->spec->image_name)
3285 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3287 return sprintf(buf, "(unknown)\n");
3290 static ssize_t rbd_image_id_show(struct device *dev,
3291 struct device_attribute *attr, char *buf)
3293 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3295 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3299 * Shows the name of the currently-mapped snapshot (or
3300 * RBD_SNAP_HEAD_NAME for the base image).
3302 static ssize_t rbd_snap_show(struct device *dev,
3303 struct device_attribute *attr,
3306 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3308 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3312 * For an rbd v2 image, shows the pool id, image id, and snapshot id
3313 * for the parent image. If there is no parent, simply shows
3314 * "(no parent image)".
3316 static ssize_t rbd_parent_show(struct device *dev,
3317 struct device_attribute *attr,
3320 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3321 struct rbd_spec *spec = rbd_dev->parent_spec;
3326 return sprintf(buf, "(no parent image)\n");
3328 count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
3329 (unsigned long long) spec->pool_id, spec->pool_name);
3334 count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
3335 spec->image_name ? spec->image_name : "(unknown)");
3340 count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
3341 (unsigned long long) spec->snap_id, spec->snap_name);
3346 count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
3351 return (ssize_t) (bufp - buf);
3354 static ssize_t rbd_image_refresh(struct device *dev,
3355 struct device_attribute *attr,
3359 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3362 ret = rbd_dev_refresh(rbd_dev);
3364 rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret);
3366 return ret < 0 ? ret : size;
3369 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3370 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3371 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3372 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3373 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3374 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3375 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3376 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3377 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3378 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3379 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3381 static struct attribute *rbd_attrs[] = {
3382 &dev_attr_size.attr,
3383 &dev_attr_features.attr,
3384 &dev_attr_major.attr,
3385 &dev_attr_client_id.attr,
3386 &dev_attr_pool.attr,
3387 &dev_attr_pool_id.attr,
3388 &dev_attr_name.attr,
3389 &dev_attr_image_id.attr,
3390 &dev_attr_current_snap.attr,
3391 &dev_attr_parent.attr,
3392 &dev_attr_refresh.attr,
3396 static struct attribute_group rbd_attr_group = {
3400 static const struct attribute_group *rbd_attr_groups[] = {
3405 static void rbd_sysfs_dev_release(struct device *dev)
3409 static struct device_type rbd_device_type = {
3411 .groups = rbd_attr_groups,
3412 .release = rbd_sysfs_dev_release,
3415 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3417 kref_get(&spec->kref);
3422 static void rbd_spec_free(struct kref *kref);
3423 static void rbd_spec_put(struct rbd_spec *spec)
3426 kref_put(&spec->kref, rbd_spec_free);
3429 static struct rbd_spec *rbd_spec_alloc(void)
3431 struct rbd_spec *spec;
3433 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3436 kref_init(&spec->kref);
3441 static void rbd_spec_free(struct kref *kref)
3443 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3445 kfree(spec->pool_name);
3446 kfree(spec->image_id);
3447 kfree(spec->image_name);
3448 kfree(spec->snap_name);
3452 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
3453 struct rbd_spec *spec)
3455 struct rbd_device *rbd_dev;
3457 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3461 spin_lock_init(&rbd_dev->lock);
3463 INIT_LIST_HEAD(&rbd_dev->node);
3464 init_rwsem(&rbd_dev->header_rwsem);
3466 rbd_dev->spec = spec;
3467 rbd_dev->rbd_client = rbdc;
3469 /* Initialize the layout used for all rbd requests */
3471 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3472 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3473 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3474 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3479 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3481 rbd_put_client(rbd_dev->rbd_client);
3482 rbd_spec_put(rbd_dev->spec);
3487 * Get the size and object order for an image snapshot, or if
3488 * snap_id is CEPH_NOSNAP, gets this information for the base
3491 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3492 u8 *order, u64 *snap_size)
3494 __le64 snapid = cpu_to_le64(snap_id);
3499 } __attribute__ ((packed)) size_buf = { 0 };
3501 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3503 &snapid, sizeof (snapid),
3504 &size_buf, sizeof (size_buf));
3505 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3508 if (ret < sizeof (size_buf))
3512 *order = size_buf.order;
3513 *snap_size = le64_to_cpu(size_buf.size);
3515 dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n",
3516 (unsigned long long)snap_id, (unsigned int)*order,
3517 (unsigned long long)*snap_size);
3522 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3524 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3525 &rbd_dev->header.obj_order,
3526 &rbd_dev->header.image_size);
3529 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3535 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
3539 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3540 "rbd", "get_object_prefix", NULL, 0,
3541 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
3542 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3547 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
3548 p + ret, NULL, GFP_NOIO);
3551 if (IS_ERR(rbd_dev->header.object_prefix)) {
3552 ret = PTR_ERR(rbd_dev->header.object_prefix);
3553 rbd_dev->header.object_prefix = NULL;
3555 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
3563 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
3566 __le64 snapid = cpu_to_le64(snap_id);
3570 } __attribute__ ((packed)) features_buf = { 0 };
3574 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3575 "rbd", "get_features",
3576 &snapid, sizeof (snapid),
3577 &features_buf, sizeof (features_buf));
3578 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3581 if (ret < sizeof (features_buf))
3584 incompat = le64_to_cpu(features_buf.incompat);
3585 if (incompat & ~RBD_FEATURES_SUPPORTED)
3588 *snap_features = le64_to_cpu(features_buf.features);
3590 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
3591 (unsigned long long)snap_id,
3592 (unsigned long long)*snap_features,
3593 (unsigned long long)le64_to_cpu(features_buf.incompat));
3598 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
3600 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
3601 &rbd_dev->header.features);
3604 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3606 struct rbd_spec *parent_spec;
3608 void *reply_buf = NULL;
3616 parent_spec = rbd_spec_alloc();
3620 size = sizeof (__le64) + /* pool_id */
3621 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
3622 sizeof (__le64) + /* snap_id */
3623 sizeof (__le64); /* overlap */
3624 reply_buf = kmalloc(size, GFP_KERNEL);
3630 snapid = cpu_to_le64(CEPH_NOSNAP);
3631 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3632 "rbd", "get_parent",
3633 &snapid, sizeof (snapid),
3635 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3640 end = reply_buf + ret;
3642 ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err);
3643 if (parent_spec->pool_id == CEPH_NOPOOL)
3644 goto out; /* No parent? No problem. */
3646 /* The ceph file layout needs to fit pool id in 32 bits */
3649 if (parent_spec->pool_id > (u64)U32_MAX) {
3650 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
3651 (unsigned long long)parent_spec->pool_id, U32_MAX);
3655 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3656 if (IS_ERR(image_id)) {
3657 ret = PTR_ERR(image_id);
3660 parent_spec->image_id = image_id;
3661 ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
3662 ceph_decode_64_safe(&p, end, overlap, out_err);
3664 rbd_dev->parent_overlap = overlap;
3665 rbd_dev->parent_spec = parent_spec;
3666 parent_spec = NULL; /* rbd_dev now owns this */
3671 rbd_spec_put(parent_spec);
3676 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
3680 __le64 stripe_count;
3681 } __attribute__ ((packed)) striping_info_buf = { 0 };
3682 size_t size = sizeof (striping_info_buf);
3689 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3690 "rbd", "get_stripe_unit_count", NULL, 0,
3691 (char *)&striping_info_buf, size);
3692 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3699 * We don't actually support the "fancy striping" feature
3700 * (STRIPINGV2) yet, but if the striping sizes are the
3701 * defaults the behavior is the same as before. So find
3702 * out, and only fail if the image has non-default values.
3705 obj_size = (u64)1 << rbd_dev->header.obj_order;
3706 p = &striping_info_buf;
3707 stripe_unit = ceph_decode_64(&p);
3708 if (stripe_unit != obj_size) {
3709 rbd_warn(rbd_dev, "unsupported stripe unit "
3710 "(got %llu want %llu)",
3711 stripe_unit, obj_size);
3714 stripe_count = ceph_decode_64(&p);
3715 if (stripe_count != 1) {
3716 rbd_warn(rbd_dev, "unsupported stripe count "
3717 "(got %llu want 1)", stripe_count);
3720 rbd_dev->header.stripe_unit = stripe_unit;
3721 rbd_dev->header.stripe_count = stripe_count;
3726 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
3728 size_t image_id_size;
3733 void *reply_buf = NULL;
3735 char *image_name = NULL;
3738 rbd_assert(!rbd_dev->spec->image_name);
3740 len = strlen(rbd_dev->spec->image_id);
3741 image_id_size = sizeof (__le32) + len;
3742 image_id = kmalloc(image_id_size, GFP_KERNEL);
3747 end = image_id + image_id_size;
3748 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
3750 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
3751 reply_buf = kmalloc(size, GFP_KERNEL);
3755 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
3756 "rbd", "dir_get_name",
3757 image_id, image_id_size,
3762 end = reply_buf + ret;
3764 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
3765 if (IS_ERR(image_name))
3768 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
3776 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
3778 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
3779 const char *snap_name;
3782 /* Skip over names until we find the one we are looking for */
3784 snap_name = rbd_dev->header.snap_names;
3785 while (which < snapc->num_snaps) {
3786 if (!strcmp(name, snap_name))
3787 return snapc->snaps[which];
3788 snap_name += strlen(snap_name) + 1;
3794 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
3796 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
3801 for (which = 0; !found && which < snapc->num_snaps; which++) {
3802 const char *snap_name;
3804 snap_id = snapc->snaps[which];
3805 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
3806 if (IS_ERR(snap_name))
3808 found = !strcmp(name, snap_name);
3811 return found ? snap_id : CEPH_NOSNAP;
3815 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
3816 * no snapshot by that name is found, or if an error occurs.
3818 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
3820 if (rbd_dev->image_format == 1)
3821 return rbd_v1_snap_id_by_name(rbd_dev, name);
3823 return rbd_v2_snap_id_by_name(rbd_dev, name);
3827 * When an rbd image has a parent image, it is identified by the
3828 * pool, image, and snapshot ids (not names). This function fills
3829 * in the names for those ids. (It's OK if we can't figure out the
3830 * name for an image id, but the pool and snapshot ids should always
3831 * exist and have names.) All names in an rbd spec are dynamically
3834 * When an image being mapped (not a parent) is probed, we have the
3835 * pool name and pool id, image name and image id, and the snapshot
3836 * name. The only thing we're missing is the snapshot id.
3838 static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
3840 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3841 struct rbd_spec *spec = rbd_dev->spec;
3842 const char *pool_name;
3843 const char *image_name;
3844 const char *snap_name;
3848 * An image being mapped will have the pool name (etc.), but
3849 * we need to look up the snapshot id.
3851 if (spec->pool_name) {
3852 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
3855 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
3856 if (snap_id == CEPH_NOSNAP)
3858 spec->snap_id = snap_id;
3860 spec->snap_id = CEPH_NOSNAP;
3866 /* Get the pool name; we have to make our own copy of this */
3868 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
3870 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
3873 pool_name = kstrdup(pool_name, GFP_KERNEL);
3877 /* Fetch the image name; tolerate failure here */
3879 image_name = rbd_dev_image_name(rbd_dev);
3881 rbd_warn(rbd_dev, "unable to get image name");
3883 /* Look up the snapshot name, and make a copy */
3885 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
3891 spec->pool_name = pool_name;
3892 spec->image_name = image_name;
3893 spec->snap_name = snap_name;
3903 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
3912 struct ceph_snap_context *snapc;
3916 * We'll need room for the seq value (maximum snapshot id),
3917 * snapshot count, and array of that many snapshot ids.
3918 * For now we have a fixed upper limit on the number we're
3919 * prepared to receive.
3921 size = sizeof (__le64) + sizeof (__le32) +
3922 RBD_MAX_SNAP_COUNT * sizeof (__le64);
3923 reply_buf = kzalloc(size, GFP_KERNEL);
3927 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3928 "rbd", "get_snapcontext", NULL, 0,
3930 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3935 end = reply_buf + ret;
3937 ceph_decode_64_safe(&p, end, seq, out);
3938 ceph_decode_32_safe(&p, end, snap_count, out);
3941 * Make sure the reported number of snapshot ids wouldn't go
3942 * beyond the end of our buffer. But before checking that,
3943 * make sure the computed size of the snapshot context we
3944 * allocate is representable in a size_t.
3946 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
3951 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
3955 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
3961 for (i = 0; i < snap_count; i++)
3962 snapc->snaps[i] = ceph_decode_64(&p);
3964 ceph_put_snap_context(rbd_dev->header.snapc);
3965 rbd_dev->header.snapc = snapc;
3967 dout(" snap context seq = %llu, snap_count = %u\n",
3968 (unsigned long long)seq, (unsigned int)snap_count);
3975 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
3986 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
3987 reply_buf = kmalloc(size, GFP_KERNEL);
3989 return ERR_PTR(-ENOMEM);
3991 snapid = cpu_to_le64(snap_id);
3992 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3993 "rbd", "get_snapshot_name",
3994 &snapid, sizeof (snapid),
3996 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3998 snap_name = ERR_PTR(ret);
4003 end = reply_buf + ret;
4004 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4005 if (IS_ERR(snap_name))
4008 dout(" snap_id 0x%016llx snap_name = %s\n",
4009 (unsigned long long)snap_id, snap_name);
4016 static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev)
4020 down_write(&rbd_dev->header_rwsem);
4022 ret = rbd_dev_v2_image_size(rbd_dev);
4025 if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
4026 if (rbd_dev->mapping.size != rbd_dev->header.image_size)
4027 rbd_dev->mapping.size = rbd_dev->header.image_size;
4029 ret = rbd_dev_v2_snap_context(rbd_dev);
4030 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4034 up_write(&rbd_dev->header_rwsem);
4039 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4044 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
4046 dev = &rbd_dev->dev;
4047 dev->bus = &rbd_bus_type;
4048 dev->type = &rbd_device_type;
4049 dev->parent = &rbd_root_dev;
4050 dev->release = rbd_dev_device_release;
4051 dev_set_name(dev, "%d", rbd_dev->dev_id);
4052 ret = device_register(dev);
4054 mutex_unlock(&ctl_mutex);
4059 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4061 device_unregister(&rbd_dev->dev);
4064 static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
4067 * Get a unique rbd identifier for the given new rbd_dev, and add
4068 * the rbd_dev to the global list. The minimum rbd id is 1.
4070 static void rbd_dev_id_get(struct rbd_device *rbd_dev)
4072 rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
4074 spin_lock(&rbd_dev_list_lock);
4075 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4076 spin_unlock(&rbd_dev_list_lock);
4077 dout("rbd_dev %p given dev id %llu\n", rbd_dev,
4078 (unsigned long long) rbd_dev->dev_id);
4082 * Remove an rbd_dev from the global list, and record that its
4083 * identifier is no longer in use.
4085 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4087 struct list_head *tmp;
4088 int rbd_id = rbd_dev->dev_id;
4091 rbd_assert(rbd_id > 0);
4093 dout("rbd_dev %p released dev id %llu\n", rbd_dev,
4094 (unsigned long long) rbd_dev->dev_id);
4095 spin_lock(&rbd_dev_list_lock);
4096 list_del_init(&rbd_dev->node);
4099 * If the id being "put" is not the current maximum, there
4100 * is nothing special we need to do.
4102 if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
4103 spin_unlock(&rbd_dev_list_lock);
4108 * We need to update the current maximum id. Search the
4109 * list to find out what it is. We're more likely to find
4110 * the maximum at the end, so search the list backward.
4113 list_for_each_prev(tmp, &rbd_dev_list) {
4114 struct rbd_device *rbd_dev;
4116 rbd_dev = list_entry(tmp, struct rbd_device, node);
4117 if (rbd_dev->dev_id > max_id)
4118 max_id = rbd_dev->dev_id;
4120 spin_unlock(&rbd_dev_list_lock);
4123 * The max id could have been updated by rbd_dev_id_get(), in
4124 * which case it now accurately reflects the new maximum.
4125 * Be careful not to overwrite the maximum value in that
4128 atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
4129 dout(" max dev id has been reset\n");
4133 * Skips over white space at *buf, and updates *buf to point to the
4134 * first found non-space character (if any). Returns the length of
4135 * the token (string of non-white space characters) found. Note
4136 * that *buf must be terminated with '\0'.
4138 static inline size_t next_token(const char **buf)
4141 * These are the characters that produce nonzero for
4142 * isspace() in the "C" and "POSIX" locales.
4144 const char *spaces = " \f\n\r\t\v";
4146 *buf += strspn(*buf, spaces); /* Find start of token */
4148 return strcspn(*buf, spaces); /* Return token length */
4152 * Finds the next token in *buf, and if the provided token buffer is
4153 * big enough, copies the found token into it. The result, if
4154 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4155 * must be terminated with '\0' on entry.
4157 * Returns the length of the token found (not including the '\0').
4158 * Return value will be 0 if no token is found, and it will be >=
4159 * token_size if the token would not fit.
4161 * The *buf pointer will be updated to point beyond the end of the
4162 * found token. Note that this occurs even if the token buffer is
4163 * too small to hold it.
4165 static inline size_t copy_token(const char **buf,
4171 len = next_token(buf);
4172 if (len < token_size) {
4173 memcpy(token, *buf, len);
4174 *(token + len) = '\0';
4182 * Finds the next token in *buf, dynamically allocates a buffer big
4183 * enough to hold a copy of it, and copies the token into the new
4184 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4185 * that a duplicate buffer is created even for a zero-length token.
4187 * Returns a pointer to the newly-allocated duplicate, or a null
4188 * pointer if memory for the duplicate was not available. If
4189 * the lenp argument is a non-null pointer, the length of the token
4190 * (not including the '\0') is returned in *lenp.
4192 * If successful, the *buf pointer will be updated to point beyond
4193 * the end of the found token.
4195 * Note: uses GFP_KERNEL for allocation.
4197 static inline char *dup_token(const char **buf, size_t *lenp)
4202 len = next_token(buf);
4203 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4206 *(dup + len) = '\0';
4216 * Parse the options provided for an "rbd add" (i.e., rbd image
4217 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4218 * and the data written is passed here via a NUL-terminated buffer.
4219 * Returns 0 if successful or an error code otherwise.
4221 * The information extracted from these options is recorded in
4222 * the other parameters which return dynamically-allocated
4225 * The address of a pointer that will refer to a ceph options
4226 * structure. Caller must release the returned pointer using
4227 * ceph_destroy_options() when it is no longer needed.
4229 * Address of an rbd options pointer. Fully initialized by
4230 * this function; caller must release with kfree().
4232 * Address of an rbd image specification pointer. Fully
4233 * initialized by this function based on parsed options.
4234 * Caller must release with rbd_spec_put().
4236 * The options passed take this form:
4237 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4240 * A comma-separated list of one or more monitor addresses.
4241 * A monitor address is an ip address, optionally followed
4242 * by a port number (separated by a colon).
4243 * I.e.: ip1[:port1][,ip2[:port2]...]
4245 * A comma-separated list of ceph and/or rbd options.
4247 * The name of the rados pool containing the rbd image.
4249 * The name of the image in that pool to map.
4251 * An optional snapshot id. If provided, the mapping will
4252 * present data from the image at the time that snapshot was
4253 * created. The image head is used if no snapshot id is
4254 * provided. Snapshot mappings are always read-only.
4256 static int rbd_add_parse_args(const char *buf,
4257 struct ceph_options **ceph_opts,
4258 struct rbd_options **opts,
4259 struct rbd_spec **rbd_spec)
4263 const char *mon_addrs;
4265 size_t mon_addrs_size;
4266 struct rbd_spec *spec = NULL;
4267 struct rbd_options *rbd_opts = NULL;
4268 struct ceph_options *copts;
4271 /* The first four tokens are required */
4273 len = next_token(&buf);
4275 rbd_warn(NULL, "no monitor address(es) provided");
4279 mon_addrs_size = len + 1;
4283 options = dup_token(&buf, NULL);
4287 rbd_warn(NULL, "no options provided");
4291 spec = rbd_spec_alloc();
4295 spec->pool_name = dup_token(&buf, NULL);
4296 if (!spec->pool_name)
4298 if (!*spec->pool_name) {
4299 rbd_warn(NULL, "no pool name provided");
4303 spec->image_name = dup_token(&buf, NULL);
4304 if (!spec->image_name)
4306 if (!*spec->image_name) {
4307 rbd_warn(NULL, "no image name provided");
4312 * Snapshot name is optional; default is to use "-"
4313 * (indicating the head/no snapshot).
4315 len = next_token(&buf);
4317 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4318 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4319 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4320 ret = -ENAMETOOLONG;
4323 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4326 *(snap_name + len) = '\0';
4327 spec->snap_name = snap_name;
4329 /* Initialize all rbd options to the defaults */
4331 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4335 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4337 copts = ceph_parse_options(options, mon_addrs,
4338 mon_addrs + mon_addrs_size - 1,
4339 parse_rbd_opts_token, rbd_opts);
4340 if (IS_ERR(copts)) {
4341 ret = PTR_ERR(copts);
4362 * An rbd format 2 image has a unique identifier, distinct from the
4363 * name given to it by the user. Internally, that identifier is
4364 * what's used to specify the names of objects related to the image.
4366 * A special "rbd id" object is used to map an rbd image name to its
4367 * id. If that object doesn't exist, then there is no v2 rbd image
4368 * with the supplied name.
4370 * This function will record the given rbd_dev's image_id field if
4371 * it can be determined, and in that case will return 0. If any
4372 * errors occur a negative errno will be returned and the rbd_dev's
4373 * image_id field will be unchanged (and should be NULL).
4375 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4384 * When probing a parent image, the image id is already
4385 * known (and the image name likely is not). There's no
4386 * need to fetch the image id again in this case. We
4387 * do still need to set the image format though.
4389 if (rbd_dev->spec->image_id) {
4390 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4396 * First, see if the format 2 image id file exists, and if
4397 * so, get the image's persistent id from it.
4399 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
4400 object_name = kmalloc(size, GFP_NOIO);
4403 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
4404 dout("rbd id object name is %s\n", object_name);
4406 /* Response will be an encoded string, which includes a length */
4408 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4409 response = kzalloc(size, GFP_NOIO);
4415 /* If it doesn't exist we'll assume it's a format 1 image */
4417 ret = rbd_obj_method_sync(rbd_dev, object_name,
4418 "rbd", "get_id", NULL, 0,
4419 response, RBD_IMAGE_ID_LEN_MAX);
4420 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4421 if (ret == -ENOENT) {
4422 image_id = kstrdup("", GFP_KERNEL);
4423 ret = image_id ? 0 : -ENOMEM;
4425 rbd_dev->image_format = 1;
4426 } else if (ret > sizeof (__le32)) {
4429 image_id = ceph_extract_encoded_string(&p, p + ret,
4431 ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0;
4433 rbd_dev->image_format = 2;
4439 rbd_dev->spec->image_id = image_id;
4440 dout("image_id is %s\n", image_id);
4449 /* Undo whatever state changes are made by v1 or v2 image probe */
4451 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4453 struct rbd_image_header *header;
4455 rbd_dev_remove_parent(rbd_dev);
4456 rbd_spec_put(rbd_dev->parent_spec);
4457 rbd_dev->parent_spec = NULL;
4458 rbd_dev->parent_overlap = 0;
4460 /* Free dynamic fields from the header, then zero it out */
4462 header = &rbd_dev->header;
4463 ceph_put_snap_context(header->snapc);
4464 kfree(header->snap_sizes);
4465 kfree(header->snap_names);
4466 kfree(header->object_prefix);
4467 memset(header, 0, sizeof (*header));
4470 static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
4472 return rbd_dev_v1_header_read(rbd_dev);
4475 static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
4479 ret = rbd_dev_v2_image_size(rbd_dev);
4483 /* Get the object prefix (a.k.a. block_name) for the image */
4485 ret = rbd_dev_v2_object_prefix(rbd_dev);
4489 /* Get the and check features for the image */
4491 ret = rbd_dev_v2_features(rbd_dev);
4495 /* If the image supports layering, get the parent info */
4497 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
4498 ret = rbd_dev_v2_parent_info(rbd_dev);
4502 * Print a warning if this image has a parent.
4503 * Don't print it if the image now being probed
4504 * is itself a parent. We can tell at this point
4505 * because we won't know its pool name yet (just its
4508 if (rbd_dev->parent_spec && rbd_dev->spec->pool_name)
4509 rbd_warn(rbd_dev, "WARNING: kernel layering "
4510 "is EXPERIMENTAL!");
4513 /* If the image supports fancy striping, get its parameters */
4515 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
4516 ret = rbd_dev_v2_striping_info(rbd_dev);
4521 /* crypto and compression type aren't (yet) supported for v2 images */
4523 rbd_dev->header.crypt_type = 0;
4524 rbd_dev->header.comp_type = 0;
4526 /* Get the snapshot context, plus the header version */
4528 ret = rbd_dev_v2_snap_context(rbd_dev);
4534 rbd_dev->parent_overlap = 0;
4535 rbd_spec_put(rbd_dev->parent_spec);
4536 rbd_dev->parent_spec = NULL;
4537 kfree(rbd_dev->header_name);
4538 rbd_dev->header_name = NULL;
4539 kfree(rbd_dev->header.object_prefix);
4540 rbd_dev->header.object_prefix = NULL;
4545 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
4547 struct rbd_device *parent = NULL;
4548 struct rbd_spec *parent_spec;
4549 struct rbd_client *rbdc;
4552 if (!rbd_dev->parent_spec)
4555 * We need to pass a reference to the client and the parent
4556 * spec when creating the parent rbd_dev. Images related by
4557 * parent/child relationships always share both.
4559 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
4560 rbdc = __rbd_get_client(rbd_dev->rbd_client);
4563 parent = rbd_dev_create(rbdc, parent_spec);
4567 ret = rbd_dev_image_probe(parent, true);
4570 rbd_dev->parent = parent;
4575 rbd_spec_put(rbd_dev->parent_spec);
4576 kfree(rbd_dev->header_name);
4577 rbd_dev_destroy(parent);
4579 rbd_put_client(rbdc);
4580 rbd_spec_put(parent_spec);
4586 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4590 /* generate unique id: find highest unique id, add one */
4591 rbd_dev_id_get(rbd_dev);
4593 /* Fill in the device name, now that we have its id. */
4594 BUILD_BUG_ON(DEV_NAME_LEN
4595 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
4596 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
4598 /* Get our block major device number. */
4600 ret = register_blkdev(0, rbd_dev->name);
4603 rbd_dev->major = ret;
4605 /* Set up the blkdev mapping. */
4607 ret = rbd_init_disk(rbd_dev);
4609 goto err_out_blkdev;
4611 ret = rbd_dev_mapping_set(rbd_dev);
4614 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
4616 ret = rbd_bus_add_dev(rbd_dev);
4618 goto err_out_mapping;
4620 /* Everything's ready. Announce the disk to the world. */
4622 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4623 add_disk(rbd_dev->disk);
4625 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
4626 (unsigned long long) rbd_dev->mapping.size);
4631 rbd_dev_mapping_clear(rbd_dev);
4633 rbd_free_disk(rbd_dev);
4635 unregister_blkdev(rbd_dev->major, rbd_dev->name);
4637 rbd_dev_id_put(rbd_dev);
4638 rbd_dev_mapping_clear(rbd_dev);
4643 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
4645 struct rbd_spec *spec = rbd_dev->spec;
4648 /* Record the header object name for this rbd image. */
4650 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4652 if (rbd_dev->image_format == 1)
4653 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
4655 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
4657 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
4658 if (!rbd_dev->header_name)
4661 if (rbd_dev->image_format == 1)
4662 sprintf(rbd_dev->header_name, "%s%s",
4663 spec->image_name, RBD_SUFFIX);
4665 sprintf(rbd_dev->header_name, "%s%s",
4666 RBD_HEADER_PREFIX, spec->image_id);
4670 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
4674 rbd_dev_unprobe(rbd_dev);
4675 ret = rbd_dev_header_watch_sync(rbd_dev, 0);
4677 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
4678 kfree(rbd_dev->header_name);
4679 rbd_dev->header_name = NULL;
4680 rbd_dev->image_format = 0;
4681 kfree(rbd_dev->spec->image_id);
4682 rbd_dev->spec->image_id = NULL;
4684 rbd_dev_destroy(rbd_dev);
4688 * Probe for the existence of the header object for the given rbd
4689 * device. For format 2 images this includes determining the image
4692 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool read_only)
4698 * Get the id from the image id object. If it's not a
4699 * format 2 image, we'll get ENOENT back, and we'll assume
4700 * it's a format 1 image.
4702 ret = rbd_dev_image_id(rbd_dev);
4705 rbd_assert(rbd_dev->spec->image_id);
4706 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4708 ret = rbd_dev_header_name(rbd_dev);
4710 goto err_out_format;
4712 ret = rbd_dev_header_watch_sync(rbd_dev, 1);
4714 goto out_header_name;
4716 if (rbd_dev->image_format == 1)
4717 ret = rbd_dev_v1_probe(rbd_dev);
4719 ret = rbd_dev_v2_probe(rbd_dev);
4723 ret = rbd_dev_spec_update(rbd_dev);
4727 /* If we are mapping a snapshot it must be marked read-only */
4729 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
4731 rbd_dev->mapping.read_only = read_only;
4733 ret = rbd_dev_probe_parent(rbd_dev);
4737 dout("discovered format %u image, header name is %s\n",
4738 rbd_dev->image_format, rbd_dev->header_name);
4742 rbd_dev_unprobe(rbd_dev);
4744 tmp = rbd_dev_header_watch_sync(rbd_dev, 0);
4746 rbd_warn(rbd_dev, "unable to tear down watch request\n");
4748 kfree(rbd_dev->header_name);
4749 rbd_dev->header_name = NULL;
4751 rbd_dev->image_format = 0;
4752 kfree(rbd_dev->spec->image_id);
4753 rbd_dev->spec->image_id = NULL;
4755 dout("probe failed, returning %d\n", ret);
4760 static ssize_t rbd_add(struct bus_type *bus,
4764 struct rbd_device *rbd_dev = NULL;
4765 struct ceph_options *ceph_opts = NULL;
4766 struct rbd_options *rbd_opts = NULL;
4767 struct rbd_spec *spec = NULL;
4768 struct rbd_client *rbdc;
4769 struct ceph_osd_client *osdc;
4773 if (!try_module_get(THIS_MODULE))
4776 /* parse add command */
4777 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
4779 goto err_out_module;
4780 read_only = rbd_opts->read_only;
4782 rbd_opts = NULL; /* done with this */
4784 rbdc = rbd_get_client(ceph_opts);
4789 ceph_opts = NULL; /* rbd_dev client now owns this */
4792 osdc = &rbdc->client->osdc;
4793 rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
4795 goto err_out_client;
4796 spec->pool_id = (u64)rc;
4798 /* The ceph file layout needs to fit pool id in 32 bits */
4800 if (spec->pool_id > (u64)U32_MAX) {
4801 rbd_warn(NULL, "pool id too large (%llu > %u)\n",
4802 (unsigned long long)spec->pool_id, U32_MAX);
4804 goto err_out_client;
4807 rbd_dev = rbd_dev_create(rbdc, spec);
4809 goto err_out_client;
4810 rbdc = NULL; /* rbd_dev now owns this */
4811 spec = NULL; /* rbd_dev now owns this */
4813 rc = rbd_dev_image_probe(rbd_dev, read_only);
4815 goto err_out_rbd_dev;
4817 rc = rbd_dev_device_setup(rbd_dev);
4821 rbd_dev_image_release(rbd_dev);
4823 rbd_dev_destroy(rbd_dev);
4825 rbd_put_client(rbdc);
4828 ceph_destroy_options(ceph_opts);
4832 module_put(THIS_MODULE);
4834 dout("Error adding device %s\n", buf);
4839 static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
4841 struct list_head *tmp;
4842 struct rbd_device *rbd_dev;
4844 spin_lock(&rbd_dev_list_lock);
4845 list_for_each(tmp, &rbd_dev_list) {
4846 rbd_dev = list_entry(tmp, struct rbd_device, node);
4847 if (rbd_dev->dev_id == dev_id) {
4848 spin_unlock(&rbd_dev_list_lock);
4852 spin_unlock(&rbd_dev_list_lock);
4856 static void rbd_dev_device_release(struct device *dev)
4858 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4860 rbd_free_disk(rbd_dev);
4861 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4862 rbd_dev_mapping_clear(rbd_dev);
4863 unregister_blkdev(rbd_dev->major, rbd_dev->name);
4865 rbd_dev_id_put(rbd_dev);
4866 rbd_dev_mapping_clear(rbd_dev);
4869 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
4871 while (rbd_dev->parent) {
4872 struct rbd_device *first = rbd_dev;
4873 struct rbd_device *second = first->parent;
4874 struct rbd_device *third;
4877 * Follow to the parent with no grandparent and
4880 while (second && (third = second->parent)) {
4885 rbd_dev_image_release(second);
4886 first->parent = NULL;
4887 first->parent_overlap = 0;
4889 rbd_assert(first->parent_spec);
4890 rbd_spec_put(first->parent_spec);
4891 first->parent_spec = NULL;
4895 static ssize_t rbd_remove(struct bus_type *bus,
4899 struct rbd_device *rbd_dev = NULL;
4904 ret = strict_strtoul(buf, 10, &ul);
4908 /* convert to int; abort if we lost anything in the conversion */
4909 target_id = (int) ul;
4910 if (target_id != ul)
4913 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
4915 rbd_dev = __rbd_get_dev(target_id);
4921 spin_lock_irq(&rbd_dev->lock);
4922 if (rbd_dev->open_count)
4925 set_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
4926 spin_unlock_irq(&rbd_dev->lock);
4930 rbd_bus_del_dev(rbd_dev);
4931 rbd_dev_image_release(rbd_dev);
4932 module_put(THIS_MODULE);
4934 mutex_unlock(&ctl_mutex);
4940 * create control files in sysfs
4943 static int rbd_sysfs_init(void)
4947 ret = device_register(&rbd_root_dev);
4951 ret = bus_register(&rbd_bus_type);
4953 device_unregister(&rbd_root_dev);
4958 static void rbd_sysfs_cleanup(void)
4960 bus_unregister(&rbd_bus_type);
4961 device_unregister(&rbd_root_dev);
4964 static int rbd_slab_init(void)
4966 rbd_assert(!rbd_img_request_cache);
4967 rbd_img_request_cache = kmem_cache_create("rbd_img_request",
4968 sizeof (struct rbd_img_request),
4969 __alignof__(struct rbd_img_request),
4971 if (!rbd_img_request_cache)
4974 rbd_assert(!rbd_obj_request_cache);
4975 rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
4976 sizeof (struct rbd_obj_request),
4977 __alignof__(struct rbd_obj_request),
4979 if (!rbd_obj_request_cache)
4982 rbd_assert(!rbd_segment_name_cache);
4983 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
4984 MAX_OBJ_NAME_SIZE + 1, 1, 0, NULL);
4985 if (rbd_segment_name_cache)
4988 if (rbd_obj_request_cache) {
4989 kmem_cache_destroy(rbd_obj_request_cache);
4990 rbd_obj_request_cache = NULL;
4993 kmem_cache_destroy(rbd_img_request_cache);
4994 rbd_img_request_cache = NULL;
4999 static void rbd_slab_exit(void)
5001 rbd_assert(rbd_segment_name_cache);
5002 kmem_cache_destroy(rbd_segment_name_cache);
5003 rbd_segment_name_cache = NULL;
5005 rbd_assert(rbd_obj_request_cache);
5006 kmem_cache_destroy(rbd_obj_request_cache);
5007 rbd_obj_request_cache = NULL;
5009 rbd_assert(rbd_img_request_cache);
5010 kmem_cache_destroy(rbd_img_request_cache);
5011 rbd_img_request_cache = NULL;
5014 static int __init rbd_init(void)
5018 if (!libceph_compatible(NULL)) {
5019 rbd_warn(NULL, "libceph incompatibility (quitting)");
5023 rc = rbd_slab_init();
5026 rc = rbd_sysfs_init();
5030 pr_info("loaded " RBD_DRV_NAME_LONG "\n");
5035 static void __exit rbd_exit(void)
5037 rbd_sysfs_cleanup();
5041 module_init(rbd_init);
5042 module_exit(rbd_exit);
5044 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5045 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5046 MODULE_DESCRIPTION("rados block device");
5048 /* following authorship retained from original osdblk.c */
5049 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5051 MODULE_LICENSE("GPL");