3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
42 #include <linux/blkdev.h>
43 #include <linux/slab.h>
45 #include "rbd_types.h"
47 #define RBD_DEBUG /* Activate rbd_assert() calls */
50 * The basic unit of block I/O is a sector. It is interpreted in a
51 * number of contexts in Linux (blk, bio, genhd), but the default is
52 * universally 512 bytes. These symbols are just slightly more
53 * meaningful than the bare numbers they represent.
55 #define SECTOR_SHIFT 9
56 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
58 #define RBD_DRV_NAME "rbd"
59 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
61 #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
63 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
64 #define RBD_MAX_SNAP_NAME_LEN \
65 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
67 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
69 #define RBD_SNAP_HEAD_NAME "-"
71 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
73 /* This allows a single page to hold an image name sent by OSD */
74 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
75 #define RBD_IMAGE_ID_LEN_MAX 64
77 #define RBD_OBJ_PREFIX_LEN_MAX 64
81 #define RBD_FEATURE_LAYERING (1<<0)
82 #define RBD_FEATURE_STRIPINGV2 (1<<1)
83 #define RBD_FEATURES_ALL \
84 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
86 /* Features supported by this (client software) implementation. */
88 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
91 * An RBD device name will be "rbd#", where the "rbd" comes from
92 * RBD_DRV_NAME above, and # is a unique integer identifier.
93 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
94 * enough to hold all possible device names.
96 #define DEV_NAME_LEN 32
97 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
100 * block device image metadata (in-memory version)
102 struct rbd_image_header {
103 /* These four fields never change for a given rbd image */
110 /* The remaining fields need to be updated occasionally */
112 struct ceph_snap_context *snapc;
121 * An rbd image specification.
123 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
124 * identify an image. Each rbd_dev structure includes a pointer to
125 * an rbd_spec structure that encapsulates this identity.
127 * Each of the id's in an rbd_spec has an associated name. For a
128 * user-mapped image, the names are supplied and the id's associated
129 * with them are looked up. For a layered image, a parent image is
130 * defined by the tuple, and the names are looked up.
132 * An rbd_dev structure contains a parent_spec pointer which is
133 * non-null if the image it represents is a child in a layered
134 * image. This pointer will refer to the rbd_spec structure used
135 * by the parent rbd_dev for its own identity (i.e., the structure
136 * is shared between the parent and child).
138 * Since these structures are populated once, during the discovery
139 * phase of image construction, they are effectively immutable so
140 * we make no effort to synchronize access to them.
142 * Note that code herein does not assume the image name is known (it
143 * could be a null pointer).
147 const char *pool_name;
149 const char *image_id;
150 const char *image_name;
153 const char *snap_name;
159 * an instance of the client. multiple devices may share an rbd client.
162 struct ceph_client *client;
164 struct list_head node;
167 struct rbd_img_request;
168 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
170 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
172 struct rbd_obj_request;
173 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
175 enum obj_request_type {
176 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
180 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
181 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
182 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
183 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
186 struct rbd_obj_request {
187 const char *object_name;
188 u64 offset; /* object start byte */
189 u64 length; /* bytes from offset */
193 * An object request associated with an image will have its
194 * img_data flag set; a standalone object request will not.
196 * A standalone object request will have which == BAD_WHICH
197 * and a null obj_request pointer.
199 * An object request initiated in support of a layered image
200 * object (to check for its existence before a write) will
201 * have which == BAD_WHICH and a non-null obj_request pointer.
203 * Finally, an object request for rbd image data will have
204 * which != BAD_WHICH, and will have a non-null img_request
205 * pointer. The value of which will be in the range
206 * 0..(img_request->obj_request_count-1).
209 struct rbd_obj_request *obj_request; /* STAT op */
211 struct rbd_img_request *img_request;
213 /* links for img_request->obj_requests list */
214 struct list_head links;
217 u32 which; /* posn image request list */
219 enum obj_request_type type;
221 struct bio *bio_list;
227 struct page **copyup_pages;
229 struct ceph_osd_request *osd_req;
231 u64 xferred; /* bytes transferred */
234 rbd_obj_callback_t callback;
235 struct completion completion;
241 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
242 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
243 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
246 struct rbd_img_request {
247 struct rbd_device *rbd_dev;
248 u64 offset; /* starting image byte offset */
249 u64 length; /* byte count from offset */
252 u64 snap_id; /* for reads */
253 struct ceph_snap_context *snapc; /* for writes */
256 struct request *rq; /* block request */
257 struct rbd_obj_request *obj_request; /* obj req initiator */
259 struct page **copyup_pages;
260 spinlock_t completion_lock;/* protects next_completion */
262 rbd_img_callback_t callback;
263 u64 xferred;/* aggregate bytes transferred */
264 int result; /* first nonzero obj_request result */
266 u32 obj_request_count;
267 struct list_head obj_requests; /* rbd_obj_request structs */
272 #define for_each_obj_request(ireq, oreq) \
273 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
274 #define for_each_obj_request_from(ireq, oreq) \
275 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
276 #define for_each_obj_request_safe(ireq, oreq, n) \
277 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
289 int dev_id; /* blkdev unique id */
291 int major; /* blkdev assigned major */
292 struct gendisk *disk; /* blkdev's gendisk and rq */
294 u32 image_format; /* Either 1 or 2 */
295 struct rbd_client *rbd_client;
297 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
299 spinlock_t lock; /* queue, flags, open_count */
301 struct rbd_image_header header;
302 unsigned long flags; /* possibly lock protected */
303 struct rbd_spec *spec;
307 struct ceph_file_layout layout;
309 struct ceph_osd_event *watch_event;
310 struct rbd_obj_request *watch_request;
312 struct rbd_spec *parent_spec;
314 struct rbd_device *parent;
316 /* protects updating the header */
317 struct rw_semaphore header_rwsem;
319 struct rbd_mapping mapping;
321 struct list_head node;
325 unsigned long open_count; /* protected by lock */
329 * Flag bits for rbd_dev->flags. If atomicity is required,
330 * rbd_dev->lock is used to protect access.
332 * Currently, only the "removing" flag (which is coupled with the
333 * "open_count" field) requires atomic access.
336 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
337 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
340 static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
342 static LIST_HEAD(rbd_dev_list); /* devices */
343 static DEFINE_SPINLOCK(rbd_dev_list_lock);
345 static LIST_HEAD(rbd_client_list); /* clients */
346 static DEFINE_SPINLOCK(rbd_client_list_lock);
348 /* Slab caches for frequently-allocated structures */
350 static struct kmem_cache *rbd_img_request_cache;
351 static struct kmem_cache *rbd_obj_request_cache;
352 static struct kmem_cache *rbd_segment_name_cache;
354 static int rbd_img_request_submit(struct rbd_img_request *img_request);
356 static void rbd_dev_device_release(struct device *dev);
358 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
360 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
362 static int rbd_dev_image_probe(struct rbd_device *rbd_dev);
364 static struct bus_attribute rbd_bus_attrs[] = {
365 __ATTR(add, S_IWUSR, NULL, rbd_add),
366 __ATTR(remove, S_IWUSR, NULL, rbd_remove),
370 static struct bus_type rbd_bus_type = {
372 .bus_attrs = rbd_bus_attrs,
375 static void rbd_root_dev_release(struct device *dev)
379 static struct device rbd_root_dev = {
381 .release = rbd_root_dev_release,
384 static __printf(2, 3)
385 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
387 struct va_format vaf;
395 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
396 else if (rbd_dev->disk)
397 printk(KERN_WARNING "%s: %s: %pV\n",
398 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
399 else if (rbd_dev->spec && rbd_dev->spec->image_name)
400 printk(KERN_WARNING "%s: image %s: %pV\n",
401 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
402 else if (rbd_dev->spec && rbd_dev->spec->image_id)
403 printk(KERN_WARNING "%s: id %s: %pV\n",
404 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
406 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
407 RBD_DRV_NAME, rbd_dev, &vaf);
412 #define rbd_assert(expr) \
413 if (unlikely(!(expr))) { \
414 printk(KERN_ERR "\nAssertion failure in %s() " \
416 "\trbd_assert(%s);\n\n", \
417 __func__, __LINE__, #expr); \
420 #else /* !RBD_DEBUG */
421 # define rbd_assert(expr) ((void) 0)
422 #endif /* !RBD_DEBUG */
424 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
425 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
426 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
428 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
429 static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev);
430 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
432 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
433 u8 *order, u64 *snap_size);
434 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
436 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
438 static int rbd_open(struct block_device *bdev, fmode_t mode)
440 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
441 bool removing = false;
443 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
446 spin_lock_irq(&rbd_dev->lock);
447 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
450 rbd_dev->open_count++;
451 spin_unlock_irq(&rbd_dev->lock);
455 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
456 (void) get_device(&rbd_dev->dev);
457 set_device_ro(bdev, rbd_dev->mapping.read_only);
458 mutex_unlock(&ctl_mutex);
463 static int rbd_release(struct gendisk *disk, fmode_t mode)
465 struct rbd_device *rbd_dev = disk->private_data;
466 unsigned long open_count_before;
468 spin_lock_irq(&rbd_dev->lock);
469 open_count_before = rbd_dev->open_count--;
470 spin_unlock_irq(&rbd_dev->lock);
471 rbd_assert(open_count_before > 0);
473 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
474 put_device(&rbd_dev->dev);
475 mutex_unlock(&ctl_mutex);
480 static const struct block_device_operations rbd_bd_ops = {
481 .owner = THIS_MODULE,
483 .release = rbd_release,
487 * Initialize an rbd client instance.
490 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
492 struct rbd_client *rbdc;
495 dout("%s:\n", __func__);
496 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
500 kref_init(&rbdc->kref);
501 INIT_LIST_HEAD(&rbdc->node);
503 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
505 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
506 if (IS_ERR(rbdc->client))
508 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
510 ret = ceph_open_session(rbdc->client);
514 spin_lock(&rbd_client_list_lock);
515 list_add_tail(&rbdc->node, &rbd_client_list);
516 spin_unlock(&rbd_client_list_lock);
518 mutex_unlock(&ctl_mutex);
519 dout("%s: rbdc %p\n", __func__, rbdc);
524 ceph_destroy_client(rbdc->client);
526 mutex_unlock(&ctl_mutex);
530 ceph_destroy_options(ceph_opts);
531 dout("%s: error %d\n", __func__, ret);
536 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
538 kref_get(&rbdc->kref);
544 * Find a ceph client with specific addr and configuration. If
545 * found, bump its reference count.
547 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
549 struct rbd_client *client_node;
552 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
555 spin_lock(&rbd_client_list_lock);
556 list_for_each_entry(client_node, &rbd_client_list, node) {
557 if (!ceph_compare_options(ceph_opts, client_node->client)) {
558 __rbd_get_client(client_node);
564 spin_unlock(&rbd_client_list_lock);
566 return found ? client_node : NULL;
576 /* string args above */
579 /* Boolean args above */
583 static match_table_t rbd_opts_tokens = {
585 /* string args above */
586 {Opt_read_only, "read_only"},
587 {Opt_read_only, "ro"}, /* Alternate spelling */
588 {Opt_read_write, "read_write"},
589 {Opt_read_write, "rw"}, /* Alternate spelling */
590 /* Boolean args above */
598 #define RBD_READ_ONLY_DEFAULT false
600 static int parse_rbd_opts_token(char *c, void *private)
602 struct rbd_options *rbd_opts = private;
603 substring_t argstr[MAX_OPT_ARGS];
604 int token, intval, ret;
606 token = match_token(c, rbd_opts_tokens, argstr);
610 if (token < Opt_last_int) {
611 ret = match_int(&argstr[0], &intval);
613 pr_err("bad mount option arg (not int) "
617 dout("got int token %d val %d\n", token, intval);
618 } else if (token > Opt_last_int && token < Opt_last_string) {
619 dout("got string token %d val %s\n", token,
621 } else if (token > Opt_last_string && token < Opt_last_bool) {
622 dout("got Boolean token %d\n", token);
624 dout("got token %d\n", token);
629 rbd_opts->read_only = true;
632 rbd_opts->read_only = false;
642 * Get a ceph client with specific addr and configuration, if one does
643 * not exist create it.
645 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
647 struct rbd_client *rbdc;
649 rbdc = rbd_client_find(ceph_opts);
650 if (rbdc) /* using an existing client */
651 ceph_destroy_options(ceph_opts);
653 rbdc = rbd_client_create(ceph_opts);
659 * Destroy ceph client
661 * Caller must hold rbd_client_list_lock.
663 static void rbd_client_release(struct kref *kref)
665 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
667 dout("%s: rbdc %p\n", __func__, rbdc);
668 spin_lock(&rbd_client_list_lock);
669 list_del(&rbdc->node);
670 spin_unlock(&rbd_client_list_lock);
672 ceph_destroy_client(rbdc->client);
677 * Drop reference to ceph client node. If it's not referenced anymore, release
680 static void rbd_put_client(struct rbd_client *rbdc)
683 kref_put(&rbdc->kref, rbd_client_release);
686 static bool rbd_image_format_valid(u32 image_format)
688 return image_format == 1 || image_format == 2;
691 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
696 /* The header has to start with the magic rbd header text */
697 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
700 /* The bio layer requires at least sector-sized I/O */
702 if (ondisk->options.order < SECTOR_SHIFT)
705 /* If we use u64 in a few spots we may be able to loosen this */
707 if (ondisk->options.order > 8 * sizeof (int) - 1)
711 * The size of a snapshot header has to fit in a size_t, and
712 * that limits the number of snapshots.
714 snap_count = le32_to_cpu(ondisk->snap_count);
715 size = SIZE_MAX - sizeof (struct ceph_snap_context);
716 if (snap_count > size / sizeof (__le64))
720 * Not only that, but the size of the entire the snapshot
721 * header must also be representable in a size_t.
723 size -= snap_count * sizeof (__le64);
724 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
731 * Create a new header structure, translate header format from the on-disk
734 static int rbd_header_from_disk(struct rbd_image_header *header,
735 struct rbd_image_header_ondisk *ondisk)
742 memset(header, 0, sizeof (*header));
744 snap_count = le32_to_cpu(ondisk->snap_count);
746 len = strnlen(ondisk->object_prefix, sizeof (ondisk->object_prefix));
747 header->object_prefix = kmalloc(len + 1, GFP_KERNEL);
748 if (!header->object_prefix)
750 memcpy(header->object_prefix, ondisk->object_prefix, len);
751 header->object_prefix[len] = '\0';
754 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
756 /* Save a copy of the snapshot names */
758 if (snap_names_len > (u64) SIZE_MAX)
760 header->snap_names = kmalloc(snap_names_len, GFP_KERNEL);
761 if (!header->snap_names)
764 * Note that rbd_dev_v1_header_read() guarantees
765 * the ondisk buffer we're working with has
766 * snap_names_len bytes beyond the end of the
767 * snapshot id array, this memcpy() is safe.
769 memcpy(header->snap_names, &ondisk->snaps[snap_count],
772 /* Record each snapshot's size */
774 size = snap_count * sizeof (*header->snap_sizes);
775 header->snap_sizes = kmalloc(size, GFP_KERNEL);
776 if (!header->snap_sizes)
778 for (i = 0; i < snap_count; i++)
779 header->snap_sizes[i] =
780 le64_to_cpu(ondisk->snaps[i].image_size);
782 header->snap_names = NULL;
783 header->snap_sizes = NULL;
786 header->features = 0; /* No features support in v1 images */
787 header->obj_order = ondisk->options.order;
788 header->crypt_type = ondisk->options.crypt_type;
789 header->comp_type = ondisk->options.comp_type;
791 /* Allocate and fill in the snapshot context */
793 header->image_size = le64_to_cpu(ondisk->image_size);
795 header->snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
798 header->snapc->seq = le64_to_cpu(ondisk->snap_seq);
799 for (i = 0; i < snap_count; i++)
800 header->snapc->snaps[i] = le64_to_cpu(ondisk->snaps[i].id);
805 kfree(header->snap_sizes);
806 header->snap_sizes = NULL;
807 kfree(header->snap_names);
808 header->snap_names = NULL;
809 kfree(header->object_prefix);
810 header->object_prefix = NULL;
815 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
817 const char *snap_name;
819 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
821 /* Skip over names until we find the one we are looking for */
823 snap_name = rbd_dev->header.snap_names;
825 snap_name += strlen(snap_name) + 1;
827 return kstrdup(snap_name, GFP_KERNEL);
831 * Snapshot id comparison function for use with qsort()/bsearch().
832 * Note that result is for snapshots in *descending* order.
834 static int snapid_compare_reverse(const void *s1, const void *s2)
836 u64 snap_id1 = *(u64 *)s1;
837 u64 snap_id2 = *(u64 *)s2;
839 if (snap_id1 < snap_id2)
841 return snap_id1 == snap_id2 ? 0 : -1;
845 * Search a snapshot context to see if the given snapshot id is
848 * Returns the position of the snapshot id in the array if it's found,
849 * or BAD_SNAP_INDEX otherwise.
851 * Note: The snapshot array is in kept sorted (by the osd) in
852 * reverse order, highest snapshot id first.
854 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
856 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
859 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
860 sizeof (snap_id), snapid_compare_reverse);
862 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
865 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
870 which = rbd_dev_snap_index(rbd_dev, snap_id);
871 if (which == BAD_SNAP_INDEX)
874 return _rbd_dev_v1_snap_name(rbd_dev, which);
877 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
879 if (snap_id == CEPH_NOSNAP)
880 return RBD_SNAP_HEAD_NAME;
882 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
883 if (rbd_dev->image_format == 1)
884 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
886 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
889 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
892 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
893 if (snap_id == CEPH_NOSNAP) {
894 *snap_size = rbd_dev->header.image_size;
895 } else if (rbd_dev->image_format == 1) {
898 which = rbd_dev_snap_index(rbd_dev, snap_id);
899 if (which == BAD_SNAP_INDEX)
902 *snap_size = rbd_dev->header.snap_sizes[which];
907 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
916 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
919 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
920 if (snap_id == CEPH_NOSNAP) {
921 *snap_features = rbd_dev->header.features;
922 } else if (rbd_dev->image_format == 1) {
923 *snap_features = 0; /* No features for format 1 */
928 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
932 *snap_features = features;
937 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
939 const char *snap_name = rbd_dev->spec->snap_name;
945 if (strcmp(snap_name, RBD_SNAP_HEAD_NAME)) {
946 snap_id = rbd_snap_id_by_name(rbd_dev, snap_name);
947 if (snap_id == CEPH_NOSNAP)
950 snap_id = CEPH_NOSNAP;
953 ret = rbd_snap_size(rbd_dev, snap_id, &size);
956 ret = rbd_snap_features(rbd_dev, snap_id, &features);
960 rbd_dev->mapping.size = size;
961 rbd_dev->mapping.features = features;
963 /* If we are mapping a snapshot it must be marked read-only */
965 if (snap_id != CEPH_NOSNAP)
966 rbd_dev->mapping.read_only = true;
971 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
973 rbd_dev->mapping.size = 0;
974 rbd_dev->mapping.features = 0;
975 rbd_dev->mapping.read_only = true;
978 static void rbd_dev_clear_mapping(struct rbd_device *rbd_dev)
980 rbd_dev->mapping.size = 0;
981 rbd_dev->mapping.features = 0;
982 rbd_dev->mapping.read_only = true;
985 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
991 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
994 segment = offset >> rbd_dev->header.obj_order;
995 ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
996 rbd_dev->header.object_prefix, segment);
997 if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
998 pr_err("error formatting segment name for #%llu (%d)\n",
1007 static void rbd_segment_name_free(const char *name)
1009 /* The explicit cast here is needed to drop the const qualifier */
1011 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1014 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1016 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1018 return offset & (segment_size - 1);
1021 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1022 u64 offset, u64 length)
1024 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1026 offset &= segment_size - 1;
1028 rbd_assert(length <= U64_MAX - offset);
1029 if (offset + length > segment_size)
1030 length = segment_size - offset;
1036 * returns the size of an object in the image
1038 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1040 return 1 << header->obj_order;
1047 static void bio_chain_put(struct bio *chain)
1053 chain = chain->bi_next;
1059 * zeros a bio chain, starting at specific offset
1061 static void zero_bio_chain(struct bio *chain, int start_ofs)
1064 unsigned long flags;
1070 bio_for_each_segment(bv, chain, i) {
1071 if (pos + bv->bv_len > start_ofs) {
1072 int remainder = max(start_ofs - pos, 0);
1073 buf = bvec_kmap_irq(bv, &flags);
1074 memset(buf + remainder, 0,
1075 bv->bv_len - remainder);
1076 bvec_kunmap_irq(buf, &flags);
1081 chain = chain->bi_next;
1086 * similar to zero_bio_chain(), zeros data defined by a page array,
1087 * starting at the given byte offset from the start of the array and
1088 * continuing up to the given end offset. The pages array is
1089 * assumed to be big enough to hold all bytes up to the end.
1091 static void zero_pages(struct page **pages, u64 offset, u64 end)
1093 struct page **page = &pages[offset >> PAGE_SHIFT];
1095 rbd_assert(end > offset);
1096 rbd_assert(end - offset <= (u64)SIZE_MAX);
1097 while (offset < end) {
1100 unsigned long flags;
1103 page_offset = (size_t)(offset & ~PAGE_MASK);
1104 length = min(PAGE_SIZE - page_offset, (size_t)(end - offset));
1105 local_irq_save(flags);
1106 kaddr = kmap_atomic(*page);
1107 memset(kaddr + page_offset, 0, length);
1108 kunmap_atomic(kaddr);
1109 local_irq_restore(flags);
1117 * Clone a portion of a bio, starting at the given byte offset
1118 * and continuing for the number of bytes indicated.
1120 static struct bio *bio_clone_range(struct bio *bio_src,
1121 unsigned int offset,
1129 unsigned short end_idx;
1130 unsigned short vcnt;
1133 /* Handle the easy case for the caller */
1135 if (!offset && len == bio_src->bi_size)
1136 return bio_clone(bio_src, gfpmask);
1138 if (WARN_ON_ONCE(!len))
1140 if (WARN_ON_ONCE(len > bio_src->bi_size))
1142 if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
1145 /* Find first affected segment... */
1148 __bio_for_each_segment(bv, bio_src, idx, 0) {
1149 if (resid < bv->bv_len)
1151 resid -= bv->bv_len;
1155 /* ...and the last affected segment */
1158 __bio_for_each_segment(bv, bio_src, end_idx, idx) {
1159 if (resid <= bv->bv_len)
1161 resid -= bv->bv_len;
1163 vcnt = end_idx - idx + 1;
1165 /* Build the clone */
1167 bio = bio_alloc(gfpmask, (unsigned int) vcnt);
1169 return NULL; /* ENOMEM */
1171 bio->bi_bdev = bio_src->bi_bdev;
1172 bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
1173 bio->bi_rw = bio_src->bi_rw;
1174 bio->bi_flags |= 1 << BIO_CLONED;
1177 * Copy over our part of the bio_vec, then update the first
1178 * and last (or only) entries.
1180 memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
1181 vcnt * sizeof (struct bio_vec));
1182 bio->bi_io_vec[0].bv_offset += voff;
1184 bio->bi_io_vec[0].bv_len -= voff;
1185 bio->bi_io_vec[vcnt - 1].bv_len = resid;
1187 bio->bi_io_vec[0].bv_len = len;
1190 bio->bi_vcnt = vcnt;
1198 * Clone a portion of a bio chain, starting at the given byte offset
1199 * into the first bio in the source chain and continuing for the
1200 * number of bytes indicated. The result is another bio chain of
1201 * exactly the given length, or a null pointer on error.
1203 * The bio_src and offset parameters are both in-out. On entry they
1204 * refer to the first source bio and the offset into that bio where
1205 * the start of data to be cloned is located.
1207 * On return, bio_src is updated to refer to the bio in the source
1208 * chain that contains first un-cloned byte, and *offset will
1209 * contain the offset of that byte within that bio.
1211 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1212 unsigned int *offset,
1216 struct bio *bi = *bio_src;
1217 unsigned int off = *offset;
1218 struct bio *chain = NULL;
1221 /* Build up a chain of clone bios up to the limit */
1223 if (!bi || off >= bi->bi_size || !len)
1224 return NULL; /* Nothing to clone */
1228 unsigned int bi_size;
1232 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1233 goto out_err; /* EINVAL; ran out of bio's */
1235 bi_size = min_t(unsigned int, bi->bi_size - off, len);
1236 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1238 goto out_err; /* ENOMEM */
1241 end = &bio->bi_next;
1244 if (off == bi->bi_size) {
1255 bio_chain_put(chain);
1261 * The default/initial value for all object request flags is 0. For
1262 * each flag, once its value is set to 1 it is never reset to 0
1265 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1267 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1268 struct rbd_device *rbd_dev;
1270 rbd_dev = obj_request->img_request->rbd_dev;
1271 rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
1276 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1279 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1282 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1284 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1285 struct rbd_device *rbd_dev = NULL;
1287 if (obj_request_img_data_test(obj_request))
1288 rbd_dev = obj_request->img_request->rbd_dev;
1289 rbd_warn(rbd_dev, "obj_request %p already marked done\n",
1294 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1297 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1301 * This sets the KNOWN flag after (possibly) setting the EXISTS
1302 * flag. The latter is set based on the "exists" value provided.
1304 * Note that for our purposes once an object exists it never goes
1305 * away again. It's possible that the response from two existence
1306 * checks are separated by the creation of the target object, and
1307 * the first ("doesn't exist") response arrives *after* the second
1308 * ("does exist"). In that case we ignore the second one.
1310 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1314 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1315 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1319 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1322 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1325 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1328 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1331 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1333 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1334 atomic_read(&obj_request->kref.refcount));
1335 kref_get(&obj_request->kref);
1338 static void rbd_obj_request_destroy(struct kref *kref);
1339 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1341 rbd_assert(obj_request != NULL);
1342 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1343 atomic_read(&obj_request->kref.refcount));
1344 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1347 static void rbd_img_request_get(struct rbd_img_request *img_request)
1349 dout("%s: img %p (was %d)\n", __func__, img_request,
1350 atomic_read(&img_request->kref.refcount));
1351 kref_get(&img_request->kref);
1354 static void rbd_img_request_destroy(struct kref *kref);
1355 static void rbd_img_request_put(struct rbd_img_request *img_request)
1357 rbd_assert(img_request != NULL);
1358 dout("%s: img %p (was %d)\n", __func__, img_request,
1359 atomic_read(&img_request->kref.refcount));
1360 kref_put(&img_request->kref, rbd_img_request_destroy);
1363 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1364 struct rbd_obj_request *obj_request)
1366 rbd_assert(obj_request->img_request == NULL);
1368 /* Image request now owns object's original reference */
1369 obj_request->img_request = img_request;
1370 obj_request->which = img_request->obj_request_count;
1371 rbd_assert(!obj_request_img_data_test(obj_request));
1372 obj_request_img_data_set(obj_request);
1373 rbd_assert(obj_request->which != BAD_WHICH);
1374 img_request->obj_request_count++;
1375 list_add_tail(&obj_request->links, &img_request->obj_requests);
1376 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1377 obj_request->which);
1380 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1381 struct rbd_obj_request *obj_request)
1383 rbd_assert(obj_request->which != BAD_WHICH);
1385 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1386 obj_request->which);
1387 list_del(&obj_request->links);
1388 rbd_assert(img_request->obj_request_count > 0);
1389 img_request->obj_request_count--;
1390 rbd_assert(obj_request->which == img_request->obj_request_count);
1391 obj_request->which = BAD_WHICH;
1392 rbd_assert(obj_request_img_data_test(obj_request));
1393 rbd_assert(obj_request->img_request == img_request);
1394 obj_request->img_request = NULL;
1395 obj_request->callback = NULL;
1396 rbd_obj_request_put(obj_request);
1399 static bool obj_request_type_valid(enum obj_request_type type)
1402 case OBJ_REQUEST_NODATA:
1403 case OBJ_REQUEST_BIO:
1404 case OBJ_REQUEST_PAGES:
1411 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1412 struct rbd_obj_request *obj_request)
1414 dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1416 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1419 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1422 dout("%s: img %p\n", __func__, img_request);
1425 * If no error occurred, compute the aggregate transfer
1426 * count for the image request. We could instead use
1427 * atomic64_cmpxchg() to update it as each object request
1428 * completes; not clear which way is better off hand.
1430 if (!img_request->result) {
1431 struct rbd_obj_request *obj_request;
1434 for_each_obj_request(img_request, obj_request)
1435 xferred += obj_request->xferred;
1436 img_request->xferred = xferred;
1439 if (img_request->callback)
1440 img_request->callback(img_request);
1442 rbd_img_request_put(img_request);
1445 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1447 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1449 dout("%s: obj %p\n", __func__, obj_request);
1451 return wait_for_completion_interruptible(&obj_request->completion);
1455 * The default/initial value for all image request flags is 0. Each
1456 * is conditionally set to 1 at image request initialization time
1457 * and currently never change thereafter.
1459 static void img_request_write_set(struct rbd_img_request *img_request)
1461 set_bit(IMG_REQ_WRITE, &img_request->flags);
1465 static bool img_request_write_test(struct rbd_img_request *img_request)
1468 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1471 static void img_request_child_set(struct rbd_img_request *img_request)
1473 set_bit(IMG_REQ_CHILD, &img_request->flags);
1477 static bool img_request_child_test(struct rbd_img_request *img_request)
1480 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1483 static void img_request_layered_set(struct rbd_img_request *img_request)
1485 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1489 static bool img_request_layered_test(struct rbd_img_request *img_request)
1492 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1496 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1498 u64 xferred = obj_request->xferred;
1499 u64 length = obj_request->length;
1501 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1502 obj_request, obj_request->img_request, obj_request->result,
1505 * ENOENT means a hole in the image. We zero-fill the
1506 * entire length of the request. A short read also implies
1507 * zero-fill to the end of the request. Either way we
1508 * update the xferred count to indicate the whole request
1511 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1512 if (obj_request->result == -ENOENT) {
1513 if (obj_request->type == OBJ_REQUEST_BIO)
1514 zero_bio_chain(obj_request->bio_list, 0);
1516 zero_pages(obj_request->pages, 0, length);
1517 obj_request->result = 0;
1518 obj_request->xferred = length;
1519 } else if (xferred < length && !obj_request->result) {
1520 if (obj_request->type == OBJ_REQUEST_BIO)
1521 zero_bio_chain(obj_request->bio_list, xferred);
1523 zero_pages(obj_request->pages, xferred, length);
1524 obj_request->xferred = length;
1526 obj_request_done_set(obj_request);
1529 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1531 dout("%s: obj %p cb %p\n", __func__, obj_request,
1532 obj_request->callback);
1533 if (obj_request->callback)
1534 obj_request->callback(obj_request);
1536 complete_all(&obj_request->completion);
1539 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1541 dout("%s: obj %p\n", __func__, obj_request);
1542 obj_request_done_set(obj_request);
1545 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1547 struct rbd_img_request *img_request = NULL;
1548 struct rbd_device *rbd_dev = NULL;
1549 bool layered = false;
1551 if (obj_request_img_data_test(obj_request)) {
1552 img_request = obj_request->img_request;
1553 layered = img_request && img_request_layered_test(img_request);
1554 rbd_dev = img_request->rbd_dev;
1557 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1558 obj_request, img_request, obj_request->result,
1559 obj_request->xferred, obj_request->length);
1560 if (layered && obj_request->result == -ENOENT &&
1561 obj_request->img_offset < rbd_dev->parent_overlap)
1562 rbd_img_parent_read(obj_request);
1563 else if (img_request)
1564 rbd_img_obj_request_read_callback(obj_request);
1566 obj_request_done_set(obj_request);
1569 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1571 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1572 obj_request->result, obj_request->length);
1574 * There is no such thing as a successful short write. Set
1575 * it to our originally-requested length.
1577 obj_request->xferred = obj_request->length;
1578 obj_request_done_set(obj_request);
1582 * For a simple stat call there's nothing to do. We'll do more if
1583 * this is part of a write sequence for a layered image.
1585 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1587 dout("%s: obj %p\n", __func__, obj_request);
1588 obj_request_done_set(obj_request);
1591 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1592 struct ceph_msg *msg)
1594 struct rbd_obj_request *obj_request = osd_req->r_priv;
1597 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1598 rbd_assert(osd_req == obj_request->osd_req);
1599 if (obj_request_img_data_test(obj_request)) {
1600 rbd_assert(obj_request->img_request);
1601 rbd_assert(obj_request->which != BAD_WHICH);
1603 rbd_assert(obj_request->which == BAD_WHICH);
1606 if (osd_req->r_result < 0)
1607 obj_request->result = osd_req->r_result;
1609 BUG_ON(osd_req->r_num_ops > 2);
1612 * We support a 64-bit length, but ultimately it has to be
1613 * passed to blk_end_request(), which takes an unsigned int.
1615 obj_request->xferred = osd_req->r_reply_op_len[0];
1616 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1617 opcode = osd_req->r_ops[0].op;
1619 case CEPH_OSD_OP_READ:
1620 rbd_osd_read_callback(obj_request);
1622 case CEPH_OSD_OP_WRITE:
1623 rbd_osd_write_callback(obj_request);
1625 case CEPH_OSD_OP_STAT:
1626 rbd_osd_stat_callback(obj_request);
1628 case CEPH_OSD_OP_CALL:
1629 case CEPH_OSD_OP_NOTIFY_ACK:
1630 case CEPH_OSD_OP_WATCH:
1631 rbd_osd_trivial_callback(obj_request);
1634 rbd_warn(NULL, "%s: unsupported op %hu\n",
1635 obj_request->object_name, (unsigned short) opcode);
1639 if (obj_request_done_test(obj_request))
1640 rbd_obj_request_complete(obj_request);
1643 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1645 struct rbd_img_request *img_request = obj_request->img_request;
1646 struct ceph_osd_request *osd_req = obj_request->osd_req;
1649 rbd_assert(osd_req != NULL);
1651 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1652 ceph_osdc_build_request(osd_req, obj_request->offset,
1653 NULL, snap_id, NULL);
1656 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1658 struct rbd_img_request *img_request = obj_request->img_request;
1659 struct ceph_osd_request *osd_req = obj_request->osd_req;
1660 struct ceph_snap_context *snapc;
1661 struct timespec mtime = CURRENT_TIME;
1663 rbd_assert(osd_req != NULL);
1665 snapc = img_request ? img_request->snapc : NULL;
1666 ceph_osdc_build_request(osd_req, obj_request->offset,
1667 snapc, CEPH_NOSNAP, &mtime);
1670 static struct ceph_osd_request *rbd_osd_req_create(
1671 struct rbd_device *rbd_dev,
1673 struct rbd_obj_request *obj_request)
1675 struct ceph_snap_context *snapc = NULL;
1676 struct ceph_osd_client *osdc;
1677 struct ceph_osd_request *osd_req;
1679 if (obj_request_img_data_test(obj_request)) {
1680 struct rbd_img_request *img_request = obj_request->img_request;
1682 rbd_assert(write_request ==
1683 img_request_write_test(img_request));
1685 snapc = img_request->snapc;
1688 /* Allocate and initialize the request, for the single op */
1690 osdc = &rbd_dev->rbd_client->client->osdc;
1691 osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
1693 return NULL; /* ENOMEM */
1696 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1698 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1700 osd_req->r_callback = rbd_osd_req_callback;
1701 osd_req->r_priv = obj_request;
1703 osd_req->r_oid_len = strlen(obj_request->object_name);
1704 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1705 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1707 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1713 * Create a copyup osd request based on the information in the
1714 * object request supplied. A copyup request has two osd ops,
1715 * a copyup method call, and a "normal" write request.
1717 static struct ceph_osd_request *
1718 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1720 struct rbd_img_request *img_request;
1721 struct ceph_snap_context *snapc;
1722 struct rbd_device *rbd_dev;
1723 struct ceph_osd_client *osdc;
1724 struct ceph_osd_request *osd_req;
1726 rbd_assert(obj_request_img_data_test(obj_request));
1727 img_request = obj_request->img_request;
1728 rbd_assert(img_request);
1729 rbd_assert(img_request_write_test(img_request));
1731 /* Allocate and initialize the request, for the two ops */
1733 snapc = img_request->snapc;
1734 rbd_dev = img_request->rbd_dev;
1735 osdc = &rbd_dev->rbd_client->client->osdc;
1736 osd_req = ceph_osdc_alloc_request(osdc, snapc, 2, false, GFP_ATOMIC);
1738 return NULL; /* ENOMEM */
1740 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1741 osd_req->r_callback = rbd_osd_req_callback;
1742 osd_req->r_priv = obj_request;
1744 osd_req->r_oid_len = strlen(obj_request->object_name);
1745 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1746 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1748 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1754 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1756 ceph_osdc_put_request(osd_req);
1759 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1761 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1762 u64 offset, u64 length,
1763 enum obj_request_type type)
1765 struct rbd_obj_request *obj_request;
1769 rbd_assert(obj_request_type_valid(type));
1771 size = strlen(object_name) + 1;
1772 name = kmalloc(size, GFP_KERNEL);
1776 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
1782 obj_request->object_name = memcpy(name, object_name, size);
1783 obj_request->offset = offset;
1784 obj_request->length = length;
1785 obj_request->flags = 0;
1786 obj_request->which = BAD_WHICH;
1787 obj_request->type = type;
1788 INIT_LIST_HEAD(&obj_request->links);
1789 init_completion(&obj_request->completion);
1790 kref_init(&obj_request->kref);
1792 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1793 offset, length, (int)type, obj_request);
1798 static void rbd_obj_request_destroy(struct kref *kref)
1800 struct rbd_obj_request *obj_request;
1802 obj_request = container_of(kref, struct rbd_obj_request, kref);
1804 dout("%s: obj %p\n", __func__, obj_request);
1806 rbd_assert(obj_request->img_request == NULL);
1807 rbd_assert(obj_request->which == BAD_WHICH);
1809 if (obj_request->osd_req)
1810 rbd_osd_req_destroy(obj_request->osd_req);
1812 rbd_assert(obj_request_type_valid(obj_request->type));
1813 switch (obj_request->type) {
1814 case OBJ_REQUEST_NODATA:
1815 break; /* Nothing to do */
1816 case OBJ_REQUEST_BIO:
1817 if (obj_request->bio_list)
1818 bio_chain_put(obj_request->bio_list);
1820 case OBJ_REQUEST_PAGES:
1821 if (obj_request->pages)
1822 ceph_release_page_vector(obj_request->pages,
1823 obj_request->page_count);
1827 kfree(obj_request->object_name);
1828 obj_request->object_name = NULL;
1829 kmem_cache_free(rbd_obj_request_cache, obj_request);
1833 * Caller is responsible for filling in the list of object requests
1834 * that comprises the image request, and the Linux request pointer
1835 * (if there is one).
1837 static struct rbd_img_request *rbd_img_request_create(
1838 struct rbd_device *rbd_dev,
1839 u64 offset, u64 length,
1843 struct rbd_img_request *img_request;
1845 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC);
1849 if (write_request) {
1850 down_read(&rbd_dev->header_rwsem);
1851 ceph_get_snap_context(rbd_dev->header.snapc);
1852 up_read(&rbd_dev->header_rwsem);
1855 img_request->rq = NULL;
1856 img_request->rbd_dev = rbd_dev;
1857 img_request->offset = offset;
1858 img_request->length = length;
1859 img_request->flags = 0;
1860 if (write_request) {
1861 img_request_write_set(img_request);
1862 img_request->snapc = rbd_dev->header.snapc;
1864 img_request->snap_id = rbd_dev->spec->snap_id;
1867 img_request_child_set(img_request);
1868 if (rbd_dev->parent_spec)
1869 img_request_layered_set(img_request);
1870 spin_lock_init(&img_request->completion_lock);
1871 img_request->next_completion = 0;
1872 img_request->callback = NULL;
1873 img_request->result = 0;
1874 img_request->obj_request_count = 0;
1875 INIT_LIST_HEAD(&img_request->obj_requests);
1876 kref_init(&img_request->kref);
1878 rbd_img_request_get(img_request); /* Avoid a warning */
1879 rbd_img_request_put(img_request); /* TEMPORARY */
1881 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
1882 write_request ? "write" : "read", offset, length,
1888 static void rbd_img_request_destroy(struct kref *kref)
1890 struct rbd_img_request *img_request;
1891 struct rbd_obj_request *obj_request;
1892 struct rbd_obj_request *next_obj_request;
1894 img_request = container_of(kref, struct rbd_img_request, kref);
1896 dout("%s: img %p\n", __func__, img_request);
1898 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1899 rbd_img_obj_request_del(img_request, obj_request);
1900 rbd_assert(img_request->obj_request_count == 0);
1902 if (img_request_write_test(img_request))
1903 ceph_put_snap_context(img_request->snapc);
1905 if (img_request_child_test(img_request))
1906 rbd_obj_request_put(img_request->obj_request);
1908 kmem_cache_free(rbd_img_request_cache, img_request);
1911 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
1913 struct rbd_img_request *img_request;
1914 unsigned int xferred;
1918 rbd_assert(obj_request_img_data_test(obj_request));
1919 img_request = obj_request->img_request;
1921 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
1922 xferred = (unsigned int)obj_request->xferred;
1923 result = obj_request->result;
1925 struct rbd_device *rbd_dev = img_request->rbd_dev;
1927 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
1928 img_request_write_test(img_request) ? "write" : "read",
1929 obj_request->length, obj_request->img_offset,
1930 obj_request->offset);
1931 rbd_warn(rbd_dev, " result %d xferred %x\n",
1933 if (!img_request->result)
1934 img_request->result = result;
1937 /* Image object requests don't own their page array */
1939 if (obj_request->type == OBJ_REQUEST_PAGES) {
1940 obj_request->pages = NULL;
1941 obj_request->page_count = 0;
1944 if (img_request_child_test(img_request)) {
1945 rbd_assert(img_request->obj_request != NULL);
1946 more = obj_request->which < img_request->obj_request_count - 1;
1948 rbd_assert(img_request->rq != NULL);
1949 more = blk_end_request(img_request->rq, result, xferred);
1955 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
1957 struct rbd_img_request *img_request;
1958 u32 which = obj_request->which;
1961 rbd_assert(obj_request_img_data_test(obj_request));
1962 img_request = obj_request->img_request;
1964 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1965 rbd_assert(img_request != NULL);
1966 rbd_assert(img_request->obj_request_count > 0);
1967 rbd_assert(which != BAD_WHICH);
1968 rbd_assert(which < img_request->obj_request_count);
1969 rbd_assert(which >= img_request->next_completion);
1971 spin_lock_irq(&img_request->completion_lock);
1972 if (which != img_request->next_completion)
1975 for_each_obj_request_from(img_request, obj_request) {
1977 rbd_assert(which < img_request->obj_request_count);
1979 if (!obj_request_done_test(obj_request))
1981 more = rbd_img_obj_end_request(obj_request);
1985 rbd_assert(more ^ (which == img_request->obj_request_count));
1986 img_request->next_completion = which;
1988 spin_unlock_irq(&img_request->completion_lock);
1991 rbd_img_request_complete(img_request);
1995 * Split up an image request into one or more object requests, each
1996 * to a different object. The "type" parameter indicates whether
1997 * "data_desc" is the pointer to the head of a list of bio
1998 * structures, or the base of a page array. In either case this
1999 * function assumes data_desc describes memory sufficient to hold
2000 * all data described by the image request.
2002 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2003 enum obj_request_type type,
2006 struct rbd_device *rbd_dev = img_request->rbd_dev;
2007 struct rbd_obj_request *obj_request = NULL;
2008 struct rbd_obj_request *next_obj_request;
2009 bool write_request = img_request_write_test(img_request);
2010 struct bio *bio_list;
2011 unsigned int bio_offset = 0;
2012 struct page **pages;
2017 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2018 (int)type, data_desc);
2020 opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
2021 img_offset = img_request->offset;
2022 resid = img_request->length;
2023 rbd_assert(resid > 0);
2025 if (type == OBJ_REQUEST_BIO) {
2026 bio_list = data_desc;
2027 rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
2029 rbd_assert(type == OBJ_REQUEST_PAGES);
2034 struct ceph_osd_request *osd_req;
2035 const char *object_name;
2039 object_name = rbd_segment_name(rbd_dev, img_offset);
2042 offset = rbd_segment_offset(rbd_dev, img_offset);
2043 length = rbd_segment_length(rbd_dev, img_offset, resid);
2044 obj_request = rbd_obj_request_create(object_name,
2045 offset, length, type);
2046 /* object request has its own copy of the object name */
2047 rbd_segment_name_free(object_name);
2051 if (type == OBJ_REQUEST_BIO) {
2052 unsigned int clone_size;
2054 rbd_assert(length <= (u64)UINT_MAX);
2055 clone_size = (unsigned int)length;
2056 obj_request->bio_list =
2057 bio_chain_clone_range(&bio_list,
2061 if (!obj_request->bio_list)
2064 unsigned int page_count;
2066 obj_request->pages = pages;
2067 page_count = (u32)calc_pages_for(offset, length);
2068 obj_request->page_count = page_count;
2069 if ((offset + length) & ~PAGE_MASK)
2070 page_count--; /* more on last page */
2071 pages += page_count;
2074 osd_req = rbd_osd_req_create(rbd_dev, write_request,
2078 obj_request->osd_req = osd_req;
2079 obj_request->callback = rbd_img_obj_callback;
2081 osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
2083 if (type == OBJ_REQUEST_BIO)
2084 osd_req_op_extent_osd_data_bio(osd_req, 0,
2085 obj_request->bio_list, length);
2087 osd_req_op_extent_osd_data_pages(osd_req, 0,
2088 obj_request->pages, length,
2089 offset & ~PAGE_MASK, false, false);
2092 rbd_osd_req_format_write(obj_request);
2094 rbd_osd_req_format_read(obj_request);
2096 obj_request->img_offset = img_offset;
2097 rbd_img_obj_request_add(img_request, obj_request);
2099 img_offset += length;
2106 rbd_obj_request_put(obj_request);
2108 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2109 rbd_obj_request_put(obj_request);
2115 rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2117 struct rbd_img_request *img_request;
2118 struct rbd_device *rbd_dev;
2122 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2123 rbd_assert(obj_request_img_data_test(obj_request));
2124 img_request = obj_request->img_request;
2125 rbd_assert(img_request);
2127 rbd_dev = img_request->rbd_dev;
2128 rbd_assert(rbd_dev);
2129 length = (u64)1 << rbd_dev->header.obj_order;
2130 page_count = (u32)calc_pages_for(0, length);
2132 rbd_assert(obj_request->copyup_pages);
2133 ceph_release_page_vector(obj_request->copyup_pages, page_count);
2134 obj_request->copyup_pages = NULL;
2137 * We want the transfer count to reflect the size of the
2138 * original write request. There is no such thing as a
2139 * successful short write, so if the request was successful
2140 * we can just set it to the originally-requested length.
2142 if (!obj_request->result)
2143 obj_request->xferred = obj_request->length;
2145 /* Finish up with the normal image object callback */
2147 rbd_img_obj_callback(obj_request);
2151 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2153 struct rbd_obj_request *orig_request;
2154 struct ceph_osd_request *osd_req;
2155 struct ceph_osd_client *osdc;
2156 struct rbd_device *rbd_dev;
2157 struct page **pages;
2162 rbd_assert(img_request_child_test(img_request));
2164 /* First get what we need from the image request */
2166 pages = img_request->copyup_pages;
2167 rbd_assert(pages != NULL);
2168 img_request->copyup_pages = NULL;
2170 orig_request = img_request->obj_request;
2171 rbd_assert(orig_request != NULL);
2172 rbd_assert(orig_request->type == OBJ_REQUEST_BIO);
2173 result = img_request->result;
2174 obj_size = img_request->length;
2175 xferred = img_request->xferred;
2177 rbd_dev = img_request->rbd_dev;
2178 rbd_assert(rbd_dev);
2179 rbd_assert(obj_size == (u64)1 << rbd_dev->header.obj_order);
2181 rbd_img_request_put(img_request);
2186 /* Allocate the new copyup osd request for the original request */
2189 rbd_assert(!orig_request->osd_req);
2190 osd_req = rbd_osd_req_create_copyup(orig_request);
2193 orig_request->osd_req = osd_req;
2194 orig_request->copyup_pages = pages;
2196 /* Initialize the copyup op */
2198 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2199 osd_req_op_cls_request_data_pages(osd_req, 0, pages, obj_size, 0,
2202 /* Then the original write request op */
2204 osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
2205 orig_request->offset,
2206 orig_request->length, 0, 0);
2207 osd_req_op_extent_osd_data_bio(osd_req, 1, orig_request->bio_list,
2208 orig_request->length);
2210 rbd_osd_req_format_write(orig_request);
2212 /* All set, send it off. */
2214 orig_request->callback = rbd_img_obj_copyup_callback;
2215 osdc = &rbd_dev->rbd_client->client->osdc;
2216 result = rbd_obj_request_submit(osdc, orig_request);
2220 /* Record the error code and complete the request */
2222 orig_request->result = result;
2223 orig_request->xferred = 0;
2224 obj_request_done_set(orig_request);
2225 rbd_obj_request_complete(orig_request);
2229 * Read from the parent image the range of data that covers the
2230 * entire target of the given object request. This is used for
2231 * satisfying a layered image write request when the target of an
2232 * object request from the image request does not exist.
2234 * A page array big enough to hold the returned data is allocated
2235 * and supplied to rbd_img_request_fill() as the "data descriptor."
2236 * When the read completes, this page array will be transferred to
2237 * the original object request for the copyup operation.
2239 * If an error occurs, record it as the result of the original
2240 * object request and mark it done so it gets completed.
2242 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2244 struct rbd_img_request *img_request = NULL;
2245 struct rbd_img_request *parent_request = NULL;
2246 struct rbd_device *rbd_dev;
2249 struct page **pages = NULL;
2253 rbd_assert(obj_request_img_data_test(obj_request));
2254 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2256 img_request = obj_request->img_request;
2257 rbd_assert(img_request != NULL);
2258 rbd_dev = img_request->rbd_dev;
2259 rbd_assert(rbd_dev->parent != NULL);
2262 * First things first. The original osd request is of no
2263 * use to use any more, we'll need a new one that can hold
2264 * the two ops in a copyup request. We'll get that later,
2265 * but for now we can release the old one.
2267 rbd_osd_req_destroy(obj_request->osd_req);
2268 obj_request->osd_req = NULL;
2271 * Determine the byte range covered by the object in the
2272 * child image to which the original request was to be sent.
2274 img_offset = obj_request->img_offset - obj_request->offset;
2275 length = (u64)1 << rbd_dev->header.obj_order;
2278 * There is no defined parent data beyond the parent
2279 * overlap, so limit what we read at that boundary if
2282 if (img_offset + length > rbd_dev->parent_overlap) {
2283 rbd_assert(img_offset < rbd_dev->parent_overlap);
2284 length = rbd_dev->parent_overlap - img_offset;
2288 * Allocate a page array big enough to receive the data read
2291 page_count = (u32)calc_pages_for(0, length);
2292 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2293 if (IS_ERR(pages)) {
2294 result = PTR_ERR(pages);
2300 parent_request = rbd_img_request_create(rbd_dev->parent,
2303 if (!parent_request)
2305 rbd_obj_request_get(obj_request);
2306 parent_request->obj_request = obj_request;
2308 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2311 parent_request->copyup_pages = pages;
2313 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2314 result = rbd_img_request_submit(parent_request);
2318 parent_request->copyup_pages = NULL;
2319 parent_request->obj_request = NULL;
2320 rbd_obj_request_put(obj_request);
2323 ceph_release_page_vector(pages, page_count);
2325 rbd_img_request_put(parent_request);
2326 obj_request->result = result;
2327 obj_request->xferred = 0;
2328 obj_request_done_set(obj_request);
2333 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2335 struct rbd_obj_request *orig_request;
2338 rbd_assert(!obj_request_img_data_test(obj_request));
2341 * All we need from the object request is the original
2342 * request and the result of the STAT op. Grab those, then
2343 * we're done with the request.
2345 orig_request = obj_request->obj_request;
2346 obj_request->obj_request = NULL;
2347 rbd_assert(orig_request);
2348 rbd_assert(orig_request->img_request);
2350 result = obj_request->result;
2351 obj_request->result = 0;
2353 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2354 obj_request, orig_request, result,
2355 obj_request->xferred, obj_request->length);
2356 rbd_obj_request_put(obj_request);
2358 rbd_assert(orig_request);
2359 rbd_assert(orig_request->img_request);
2362 * Our only purpose here is to determine whether the object
2363 * exists, and we don't want to treat the non-existence as
2364 * an error. If something else comes back, transfer the
2365 * error to the original request and complete it now.
2368 obj_request_existence_set(orig_request, true);
2369 } else if (result == -ENOENT) {
2370 obj_request_existence_set(orig_request, false);
2371 } else if (result) {
2372 orig_request->result = result;
2377 * Resubmit the original request now that we have recorded
2378 * whether the target object exists.
2380 orig_request->result = rbd_img_obj_request_submit(orig_request);
2382 if (orig_request->result)
2383 rbd_obj_request_complete(orig_request);
2384 rbd_obj_request_put(orig_request);
2387 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2389 struct rbd_obj_request *stat_request;
2390 struct rbd_device *rbd_dev;
2391 struct ceph_osd_client *osdc;
2392 struct page **pages = NULL;
2398 * The response data for a STAT call consists of:
2405 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2406 page_count = (u32)calc_pages_for(0, size);
2407 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2409 return PTR_ERR(pages);
2412 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2417 rbd_obj_request_get(obj_request);
2418 stat_request->obj_request = obj_request;
2419 stat_request->pages = pages;
2420 stat_request->page_count = page_count;
2422 rbd_assert(obj_request->img_request);
2423 rbd_dev = obj_request->img_request->rbd_dev;
2424 stat_request->osd_req = rbd_osd_req_create(rbd_dev, false,
2426 if (!stat_request->osd_req)
2428 stat_request->callback = rbd_img_obj_exists_callback;
2430 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2431 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2433 rbd_osd_req_format_read(stat_request);
2435 osdc = &rbd_dev->rbd_client->client->osdc;
2436 ret = rbd_obj_request_submit(osdc, stat_request);
2439 rbd_obj_request_put(obj_request);
2444 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2446 struct rbd_img_request *img_request;
2447 struct rbd_device *rbd_dev;
2450 rbd_assert(obj_request_img_data_test(obj_request));
2452 img_request = obj_request->img_request;
2453 rbd_assert(img_request);
2454 rbd_dev = img_request->rbd_dev;
2457 * Only writes to layered images need special handling.
2458 * Reads and non-layered writes are simple object requests.
2459 * Layered writes that start beyond the end of the overlap
2460 * with the parent have no parent data, so they too are
2461 * simple object requests. Finally, if the target object is
2462 * known to already exist, its parent data has already been
2463 * copied, so a write to the object can also be handled as a
2464 * simple object request.
2466 if (!img_request_write_test(img_request) ||
2467 !img_request_layered_test(img_request) ||
2468 rbd_dev->parent_overlap <= obj_request->img_offset ||
2469 ((known = obj_request_known_test(obj_request)) &&
2470 obj_request_exists_test(obj_request))) {
2472 struct rbd_device *rbd_dev;
2473 struct ceph_osd_client *osdc;
2475 rbd_dev = obj_request->img_request->rbd_dev;
2476 osdc = &rbd_dev->rbd_client->client->osdc;
2478 return rbd_obj_request_submit(osdc, obj_request);
2482 * It's a layered write. The target object might exist but
2483 * we may not know that yet. If we know it doesn't exist,
2484 * start by reading the data for the full target object from
2485 * the parent so we can use it for a copyup to the target.
2488 return rbd_img_obj_parent_read_full(obj_request);
2490 /* We don't know whether the target exists. Go find out. */
2492 return rbd_img_obj_exists_submit(obj_request);
2495 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2497 struct rbd_obj_request *obj_request;
2498 struct rbd_obj_request *next_obj_request;
2500 dout("%s: img %p\n", __func__, img_request);
2501 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2504 ret = rbd_img_obj_request_submit(obj_request);
2512 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2514 struct rbd_obj_request *obj_request;
2515 struct rbd_device *rbd_dev;
2518 rbd_assert(img_request_child_test(img_request));
2520 obj_request = img_request->obj_request;
2521 rbd_assert(obj_request);
2522 rbd_assert(obj_request->img_request);
2524 obj_request->result = img_request->result;
2525 if (obj_request->result)
2529 * We need to zero anything beyond the parent overlap
2530 * boundary. Since rbd_img_obj_request_read_callback()
2531 * will zero anything beyond the end of a short read, an
2532 * easy way to do this is to pretend the data from the
2533 * parent came up short--ending at the overlap boundary.
2535 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2536 obj_end = obj_request->img_offset + obj_request->length;
2537 rbd_dev = obj_request->img_request->rbd_dev;
2538 if (obj_end > rbd_dev->parent_overlap) {
2541 if (obj_request->img_offset < rbd_dev->parent_overlap)
2542 xferred = rbd_dev->parent_overlap -
2543 obj_request->img_offset;
2545 obj_request->xferred = min(img_request->xferred, xferred);
2547 obj_request->xferred = img_request->xferred;
2550 rbd_img_obj_request_read_callback(obj_request);
2551 rbd_obj_request_complete(obj_request);
2554 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2556 struct rbd_device *rbd_dev;
2557 struct rbd_img_request *img_request;
2560 rbd_assert(obj_request_img_data_test(obj_request));
2561 rbd_assert(obj_request->img_request != NULL);
2562 rbd_assert(obj_request->result == (s32) -ENOENT);
2563 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2565 rbd_dev = obj_request->img_request->rbd_dev;
2566 rbd_assert(rbd_dev->parent != NULL);
2567 /* rbd_read_finish(obj_request, obj_request->length); */
2568 img_request = rbd_img_request_create(rbd_dev->parent,
2569 obj_request->img_offset,
2570 obj_request->length,
2576 rbd_obj_request_get(obj_request);
2577 img_request->obj_request = obj_request;
2579 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2580 obj_request->bio_list);
2584 img_request->callback = rbd_img_parent_read_callback;
2585 result = rbd_img_request_submit(img_request);
2592 rbd_img_request_put(img_request);
2593 obj_request->result = result;
2594 obj_request->xferred = 0;
2595 obj_request_done_set(obj_request);
2598 static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id)
2600 struct rbd_obj_request *obj_request;
2601 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2604 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2605 OBJ_REQUEST_NODATA);
2610 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2611 if (!obj_request->osd_req)
2613 obj_request->callback = rbd_obj_request_put;
2615 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
2617 rbd_osd_req_format_read(obj_request);
2619 ret = rbd_obj_request_submit(osdc, obj_request);
2622 rbd_obj_request_put(obj_request);
2627 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2629 struct rbd_device *rbd_dev = (struct rbd_device *)data;
2634 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
2635 rbd_dev->header_name, (unsigned long long)notify_id,
2636 (unsigned int)opcode);
2637 (void)rbd_dev_refresh(rbd_dev);
2639 rbd_obj_notify_ack(rbd_dev, notify_id);
2643 * Request sync osd watch/unwatch. The value of "start" determines
2644 * whether a watch request is being initiated or torn down.
2646 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, int start)
2648 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2649 struct rbd_obj_request *obj_request;
2652 rbd_assert(start ^ !!rbd_dev->watch_event);
2653 rbd_assert(start ^ !!rbd_dev->watch_request);
2656 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
2657 &rbd_dev->watch_event);
2660 rbd_assert(rbd_dev->watch_event != NULL);
2664 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2665 OBJ_REQUEST_NODATA);
2669 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request);
2670 if (!obj_request->osd_req)
2674 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
2676 ceph_osdc_unregister_linger_request(osdc,
2677 rbd_dev->watch_request->osd_req);
2679 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2680 rbd_dev->watch_event->cookie, 0, start);
2681 rbd_osd_req_format_write(obj_request);
2683 ret = rbd_obj_request_submit(osdc, obj_request);
2686 ret = rbd_obj_request_wait(obj_request);
2689 ret = obj_request->result;
2694 * A watch request is set to linger, so the underlying osd
2695 * request won't go away until we unregister it. We retain
2696 * a pointer to the object request during that time (in
2697 * rbd_dev->watch_request), so we'll keep a reference to
2698 * it. We'll drop that reference (below) after we've
2702 rbd_dev->watch_request = obj_request;
2707 /* We have successfully torn down the watch request */
2709 rbd_obj_request_put(rbd_dev->watch_request);
2710 rbd_dev->watch_request = NULL;
2712 /* Cancel the event if we're tearing down, or on error */
2713 ceph_osdc_cancel_event(rbd_dev->watch_event);
2714 rbd_dev->watch_event = NULL;
2716 rbd_obj_request_put(obj_request);
2722 * Synchronous osd object method call. Returns the number of bytes
2723 * returned in the outbound buffer, or a negative error code.
2725 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
2726 const char *object_name,
2727 const char *class_name,
2728 const char *method_name,
2729 const void *outbound,
2730 size_t outbound_size,
2732 size_t inbound_size)
2734 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2735 struct rbd_obj_request *obj_request;
2736 struct page **pages;
2741 * Method calls are ultimately read operations. The result
2742 * should placed into the inbound buffer provided. They
2743 * also supply outbound data--parameters for the object
2744 * method. Currently if this is present it will be a
2747 page_count = (u32)calc_pages_for(0, inbound_size);
2748 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2750 return PTR_ERR(pages);
2753 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
2758 obj_request->pages = pages;
2759 obj_request->page_count = page_count;
2761 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2762 if (!obj_request->osd_req)
2765 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
2766 class_name, method_name);
2767 if (outbound_size) {
2768 struct ceph_pagelist *pagelist;
2770 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
2774 ceph_pagelist_init(pagelist);
2775 ceph_pagelist_append(pagelist, outbound, outbound_size);
2776 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
2779 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
2780 obj_request->pages, inbound_size,
2782 rbd_osd_req_format_read(obj_request);
2784 ret = rbd_obj_request_submit(osdc, obj_request);
2787 ret = rbd_obj_request_wait(obj_request);
2791 ret = obj_request->result;
2795 rbd_assert(obj_request->xferred < (u64)INT_MAX);
2796 ret = (int)obj_request->xferred;
2797 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
2800 rbd_obj_request_put(obj_request);
2802 ceph_release_page_vector(pages, page_count);
2807 static void rbd_request_fn(struct request_queue *q)
2808 __releases(q->queue_lock) __acquires(q->queue_lock)
2810 struct rbd_device *rbd_dev = q->queuedata;
2811 bool read_only = rbd_dev->mapping.read_only;
2815 while ((rq = blk_fetch_request(q))) {
2816 bool write_request = rq_data_dir(rq) == WRITE;
2817 struct rbd_img_request *img_request;
2821 /* Ignore any non-FS requests that filter through. */
2823 if (rq->cmd_type != REQ_TYPE_FS) {
2824 dout("%s: non-fs request type %d\n", __func__,
2825 (int) rq->cmd_type);
2826 __blk_end_request_all(rq, 0);
2830 /* Ignore/skip any zero-length requests */
2832 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
2833 length = (u64) blk_rq_bytes(rq);
2836 dout("%s: zero-length request\n", __func__);
2837 __blk_end_request_all(rq, 0);
2841 spin_unlock_irq(q->queue_lock);
2843 /* Disallow writes to a read-only device */
2845 if (write_request) {
2849 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
2853 * Quit early if the mapped snapshot no longer
2854 * exists. It's still possible the snapshot will
2855 * have disappeared by the time our request arrives
2856 * at the osd, but there's no sense in sending it if
2859 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
2860 dout("request for non-existent snapshot");
2861 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
2867 if (offset && length > U64_MAX - offset + 1) {
2868 rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
2870 goto end_request; /* Shouldn't happen */
2874 img_request = rbd_img_request_create(rbd_dev, offset, length,
2875 write_request, false);
2879 img_request->rq = rq;
2881 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2884 result = rbd_img_request_submit(img_request);
2886 rbd_img_request_put(img_request);
2888 spin_lock_irq(q->queue_lock);
2890 rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
2891 write_request ? "write" : "read",
2892 length, offset, result);
2894 __blk_end_request_all(rq, result);
2900 * a queue callback. Makes sure that we don't create a bio that spans across
2901 * multiple osd objects. One exception would be with a single page bios,
2902 * which we handle later at bio_chain_clone_range()
2904 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
2905 struct bio_vec *bvec)
2907 struct rbd_device *rbd_dev = q->queuedata;
2908 sector_t sector_offset;
2909 sector_t sectors_per_obj;
2910 sector_t obj_sector_offset;
2914 * Find how far into its rbd object the partition-relative
2915 * bio start sector is to offset relative to the enclosing
2918 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
2919 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
2920 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
2923 * Compute the number of bytes from that offset to the end
2924 * of the object. Account for what's already used by the bio.
2926 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
2927 if (ret > bmd->bi_size)
2928 ret -= bmd->bi_size;
2933 * Don't send back more than was asked for. And if the bio
2934 * was empty, let the whole thing through because: "Note
2935 * that a block device *must* allow a single page to be
2936 * added to an empty bio."
2938 rbd_assert(bvec->bv_len <= PAGE_SIZE);
2939 if (ret > (int) bvec->bv_len || !bmd->bi_size)
2940 ret = (int) bvec->bv_len;
2945 static void rbd_free_disk(struct rbd_device *rbd_dev)
2947 struct gendisk *disk = rbd_dev->disk;
2952 rbd_dev->disk = NULL;
2953 if (disk->flags & GENHD_FL_UP) {
2956 blk_cleanup_queue(disk->queue);
2961 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
2962 const char *object_name,
2963 u64 offset, u64 length, void *buf)
2966 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2967 struct rbd_obj_request *obj_request;
2968 struct page **pages = NULL;
2973 page_count = (u32) calc_pages_for(offset, length);
2974 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2976 ret = PTR_ERR(pages);
2979 obj_request = rbd_obj_request_create(object_name, offset, length,
2984 obj_request->pages = pages;
2985 obj_request->page_count = page_count;
2987 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2988 if (!obj_request->osd_req)
2991 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
2992 offset, length, 0, 0);
2993 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
2995 obj_request->length,
2996 obj_request->offset & ~PAGE_MASK,
2998 rbd_osd_req_format_read(obj_request);
3000 ret = rbd_obj_request_submit(osdc, obj_request);
3003 ret = rbd_obj_request_wait(obj_request);
3007 ret = obj_request->result;
3011 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3012 size = (size_t) obj_request->xferred;
3013 ceph_copy_from_page_vector(pages, buf, 0, size);
3014 rbd_assert(size <= (size_t)INT_MAX);
3018 rbd_obj_request_put(obj_request);
3020 ceph_release_page_vector(pages, page_count);
3026 * Read the complete header for the given rbd device.
3028 * Returns a pointer to a dynamically-allocated buffer containing
3029 * the complete and validated header. Caller can pass the address
3030 * of a variable that will be filled in with the version of the
3031 * header object at the time it was read.
3033 * Returns a pointer-coded errno if a failure occurs.
3035 static struct rbd_image_header_ondisk *
3036 rbd_dev_v1_header_read(struct rbd_device *rbd_dev)
3038 struct rbd_image_header_ondisk *ondisk = NULL;
3045 * The complete header will include an array of its 64-bit
3046 * snapshot ids, followed by the names of those snapshots as
3047 * a contiguous block of NUL-terminated strings. Note that
3048 * the number of snapshots could change by the time we read
3049 * it in, in which case we re-read it.
3056 size = sizeof (*ondisk);
3057 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3059 ondisk = kmalloc(size, GFP_KERNEL);
3061 return ERR_PTR(-ENOMEM);
3063 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3067 if ((size_t)ret < size) {
3069 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3073 if (!rbd_dev_ondisk_valid(ondisk)) {
3075 rbd_warn(rbd_dev, "invalid header");
3079 names_size = le64_to_cpu(ondisk->snap_names_len);
3080 want_count = snap_count;
3081 snap_count = le32_to_cpu(ondisk->snap_count);
3082 } while (snap_count != want_count);
3089 return ERR_PTR(ret);
3093 * reload the ondisk the header
3095 static int rbd_read_header(struct rbd_device *rbd_dev,
3096 struct rbd_image_header *header)
3098 struct rbd_image_header_ondisk *ondisk;
3101 ondisk = rbd_dev_v1_header_read(rbd_dev);
3103 return PTR_ERR(ondisk);
3104 ret = rbd_header_from_disk(header, ondisk);
3110 static void rbd_update_mapping_size(struct rbd_device *rbd_dev)
3112 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
3115 if (rbd_dev->mapping.size != rbd_dev->header.image_size) {
3118 rbd_dev->mapping.size = rbd_dev->header.image_size;
3119 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3120 dout("setting size to %llu sectors", (unsigned long long)size);
3121 set_capacity(rbd_dev->disk, size);
3126 * only read the first part of the ondisk header, without the snaps info
3128 static int rbd_dev_v1_refresh(struct rbd_device *rbd_dev)
3131 struct rbd_image_header h;
3133 ret = rbd_read_header(rbd_dev, &h);
3137 down_write(&rbd_dev->header_rwsem);
3139 /* Update image size, and check for resize of mapped image */
3140 rbd_dev->header.image_size = h.image_size;
3141 rbd_update_mapping_size(rbd_dev);
3143 /* rbd_dev->header.object_prefix shouldn't change */
3144 kfree(rbd_dev->header.snap_sizes);
3145 kfree(rbd_dev->header.snap_names);
3146 /* osd requests may still refer to snapc */
3147 ceph_put_snap_context(rbd_dev->header.snapc);
3149 rbd_dev->header.image_size = h.image_size;
3150 rbd_dev->header.snapc = h.snapc;
3151 rbd_dev->header.snap_names = h.snap_names;
3152 rbd_dev->header.snap_sizes = h.snap_sizes;
3153 /* Free the extra copy of the object prefix */
3154 if (strcmp(rbd_dev->header.object_prefix, h.object_prefix))
3155 rbd_warn(rbd_dev, "object prefix changed (ignoring)");
3156 kfree(h.object_prefix);
3158 up_write(&rbd_dev->header_rwsem);
3164 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3165 * has disappeared from the (just updated) snapshot context.
3167 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3171 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3174 snap_id = rbd_dev->spec->snap_id;
3175 if (snap_id == CEPH_NOSNAP)
3178 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3179 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3182 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3187 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
3188 image_size = rbd_dev->header.image_size;
3189 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
3190 if (rbd_dev->image_format == 1)
3191 ret = rbd_dev_v1_refresh(rbd_dev);
3193 ret = rbd_dev_v2_refresh(rbd_dev);
3195 /* If it's a mapped snapshot, validate its EXISTS flag */
3197 rbd_exists_validate(rbd_dev);
3198 mutex_unlock(&ctl_mutex);
3200 rbd_warn(rbd_dev, "got notification but failed to "
3201 " update snaps: %d\n", ret);
3202 if (image_size != rbd_dev->header.image_size)
3203 revalidate_disk(rbd_dev->disk);
3208 static int rbd_init_disk(struct rbd_device *rbd_dev)
3210 struct gendisk *disk;
3211 struct request_queue *q;
3214 /* create gendisk info */
3215 disk = alloc_disk(RBD_MINORS_PER_MAJOR);
3219 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3221 disk->major = rbd_dev->major;
3222 disk->first_minor = 0;
3223 disk->fops = &rbd_bd_ops;
3224 disk->private_data = rbd_dev;
3226 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
3230 /* We use the default size, but let's be explicit about it. */
3231 blk_queue_physical_block_size(q, SECTOR_SIZE);
3233 /* set io sizes to object size */
3234 segment_size = rbd_obj_bytes(&rbd_dev->header);
3235 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3236 blk_queue_max_segment_size(q, segment_size);
3237 blk_queue_io_min(q, segment_size);
3238 blk_queue_io_opt(q, segment_size);
3240 blk_queue_merge_bvec(q, rbd_merge_bvec);
3243 q->queuedata = rbd_dev;
3245 rbd_dev->disk = disk;
3258 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3260 return container_of(dev, struct rbd_device, dev);
3263 static ssize_t rbd_size_show(struct device *dev,
3264 struct device_attribute *attr, char *buf)
3266 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3268 return sprintf(buf, "%llu\n",
3269 (unsigned long long)rbd_dev->mapping.size);
3273 * Note this shows the features for whatever's mapped, which is not
3274 * necessarily the base image.
3276 static ssize_t rbd_features_show(struct device *dev,
3277 struct device_attribute *attr, char *buf)
3279 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3281 return sprintf(buf, "0x%016llx\n",
3282 (unsigned long long)rbd_dev->mapping.features);
3285 static ssize_t rbd_major_show(struct device *dev,
3286 struct device_attribute *attr, char *buf)
3288 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3291 return sprintf(buf, "%d\n", rbd_dev->major);
3293 return sprintf(buf, "(none)\n");
3297 static ssize_t rbd_client_id_show(struct device *dev,
3298 struct device_attribute *attr, char *buf)
3300 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3302 return sprintf(buf, "client%lld\n",
3303 ceph_client_id(rbd_dev->rbd_client->client));
3306 static ssize_t rbd_pool_show(struct device *dev,
3307 struct device_attribute *attr, char *buf)
3309 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3311 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3314 static ssize_t rbd_pool_id_show(struct device *dev,
3315 struct device_attribute *attr, char *buf)
3317 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3319 return sprintf(buf, "%llu\n",
3320 (unsigned long long) rbd_dev->spec->pool_id);
3323 static ssize_t rbd_name_show(struct device *dev,
3324 struct device_attribute *attr, char *buf)
3326 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3328 if (rbd_dev->spec->image_name)
3329 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3331 return sprintf(buf, "(unknown)\n");
3334 static ssize_t rbd_image_id_show(struct device *dev,
3335 struct device_attribute *attr, char *buf)
3337 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3339 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3343 * Shows the name of the currently-mapped snapshot (or
3344 * RBD_SNAP_HEAD_NAME for the base image).
3346 static ssize_t rbd_snap_show(struct device *dev,
3347 struct device_attribute *attr,
3350 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3352 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3356 * For an rbd v2 image, shows the pool id, image id, and snapshot id
3357 * for the parent image. If there is no parent, simply shows
3358 * "(no parent image)".
3360 static ssize_t rbd_parent_show(struct device *dev,
3361 struct device_attribute *attr,
3364 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3365 struct rbd_spec *spec = rbd_dev->parent_spec;
3370 return sprintf(buf, "(no parent image)\n");
3372 count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
3373 (unsigned long long) spec->pool_id, spec->pool_name);
3378 count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
3379 spec->image_name ? spec->image_name : "(unknown)");
3384 count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
3385 (unsigned long long) spec->snap_id, spec->snap_name);
3390 count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
3395 return (ssize_t) (bufp - buf);
3398 static ssize_t rbd_image_refresh(struct device *dev,
3399 struct device_attribute *attr,
3403 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3406 ret = rbd_dev_refresh(rbd_dev);
3408 return ret < 0 ? ret : size;
3411 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3412 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3413 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3414 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3415 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3416 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3417 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3418 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3419 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3420 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3421 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3423 static struct attribute *rbd_attrs[] = {
3424 &dev_attr_size.attr,
3425 &dev_attr_features.attr,
3426 &dev_attr_major.attr,
3427 &dev_attr_client_id.attr,
3428 &dev_attr_pool.attr,
3429 &dev_attr_pool_id.attr,
3430 &dev_attr_name.attr,
3431 &dev_attr_image_id.attr,
3432 &dev_attr_current_snap.attr,
3433 &dev_attr_parent.attr,
3434 &dev_attr_refresh.attr,
3438 static struct attribute_group rbd_attr_group = {
3442 static const struct attribute_group *rbd_attr_groups[] = {
3447 static void rbd_sysfs_dev_release(struct device *dev)
3451 static struct device_type rbd_device_type = {
3453 .groups = rbd_attr_groups,
3454 .release = rbd_sysfs_dev_release,
3457 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3459 kref_get(&spec->kref);
3464 static void rbd_spec_free(struct kref *kref);
3465 static void rbd_spec_put(struct rbd_spec *spec)
3468 kref_put(&spec->kref, rbd_spec_free);
3471 static struct rbd_spec *rbd_spec_alloc(void)
3473 struct rbd_spec *spec;
3475 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3478 kref_init(&spec->kref);
3483 static void rbd_spec_free(struct kref *kref)
3485 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3487 kfree(spec->pool_name);
3488 kfree(spec->image_id);
3489 kfree(spec->image_name);
3490 kfree(spec->snap_name);
3494 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
3495 struct rbd_spec *spec)
3497 struct rbd_device *rbd_dev;
3499 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3503 spin_lock_init(&rbd_dev->lock);
3505 INIT_LIST_HEAD(&rbd_dev->node);
3506 init_rwsem(&rbd_dev->header_rwsem);
3508 rbd_dev->spec = spec;
3509 rbd_dev->rbd_client = rbdc;
3511 /* Initialize the layout used for all rbd requests */
3513 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3514 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3515 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3516 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3521 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3523 rbd_put_client(rbd_dev->rbd_client);
3524 rbd_spec_put(rbd_dev->spec);
3529 * Get the size and object order for an image snapshot, or if
3530 * snap_id is CEPH_NOSNAP, gets this information for the base
3533 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3534 u8 *order, u64 *snap_size)
3536 __le64 snapid = cpu_to_le64(snap_id);
3541 } __attribute__ ((packed)) size_buf = { 0 };
3543 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3545 &snapid, sizeof (snapid),
3546 &size_buf, sizeof (size_buf));
3547 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3550 if (ret < sizeof (size_buf))
3554 *order = size_buf.order;
3555 *snap_size = le64_to_cpu(size_buf.size);
3557 dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n",
3558 (unsigned long long)snap_id, (unsigned int)*order,
3559 (unsigned long long)*snap_size);
3564 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3566 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3567 &rbd_dev->header.obj_order,
3568 &rbd_dev->header.image_size);
3571 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3577 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
3581 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3582 "rbd", "get_object_prefix", NULL, 0,
3583 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
3584 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3589 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
3590 p + ret, NULL, GFP_NOIO);
3593 if (IS_ERR(rbd_dev->header.object_prefix)) {
3594 ret = PTR_ERR(rbd_dev->header.object_prefix);
3595 rbd_dev->header.object_prefix = NULL;
3597 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
3605 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
3608 __le64 snapid = cpu_to_le64(snap_id);
3612 } __attribute__ ((packed)) features_buf = { 0 };
3616 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3617 "rbd", "get_features",
3618 &snapid, sizeof (snapid),
3619 &features_buf, sizeof (features_buf));
3620 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3623 if (ret < sizeof (features_buf))
3626 incompat = le64_to_cpu(features_buf.incompat);
3627 if (incompat & ~RBD_FEATURES_SUPPORTED)
3630 *snap_features = le64_to_cpu(features_buf.features);
3632 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
3633 (unsigned long long)snap_id,
3634 (unsigned long long)*snap_features,
3635 (unsigned long long)le64_to_cpu(features_buf.incompat));
3640 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
3642 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
3643 &rbd_dev->header.features);
3646 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3648 struct rbd_spec *parent_spec;
3650 void *reply_buf = NULL;
3658 parent_spec = rbd_spec_alloc();
3662 size = sizeof (__le64) + /* pool_id */
3663 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
3664 sizeof (__le64) + /* snap_id */
3665 sizeof (__le64); /* overlap */
3666 reply_buf = kmalloc(size, GFP_KERNEL);
3672 snapid = cpu_to_le64(CEPH_NOSNAP);
3673 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3674 "rbd", "get_parent",
3675 &snapid, sizeof (snapid),
3677 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3682 end = reply_buf + ret;
3684 ceph_decode_64_safe(&p, end, parent_spec->pool_id, out_err);
3685 if (parent_spec->pool_id == CEPH_NOPOOL)
3686 goto out; /* No parent? No problem. */
3688 /* The ceph file layout needs to fit pool id in 32 bits */
3691 if (parent_spec->pool_id > (u64)U32_MAX) {
3692 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
3693 (unsigned long long)parent_spec->pool_id, U32_MAX);
3697 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3698 if (IS_ERR(image_id)) {
3699 ret = PTR_ERR(image_id);
3702 parent_spec->image_id = image_id;
3703 ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
3704 ceph_decode_64_safe(&p, end, overlap, out_err);
3706 rbd_dev->parent_overlap = overlap;
3707 rbd_dev->parent_spec = parent_spec;
3708 parent_spec = NULL; /* rbd_dev now owns this */
3713 rbd_spec_put(parent_spec);
3718 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
3722 __le64 stripe_count;
3723 } __attribute__ ((packed)) striping_info_buf = { 0 };
3724 size_t size = sizeof (striping_info_buf);
3731 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3732 "rbd", "get_stripe_unit_count", NULL, 0,
3733 (char *)&striping_info_buf, size);
3734 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3741 * We don't actually support the "fancy striping" feature
3742 * (STRIPINGV2) yet, but if the striping sizes are the
3743 * defaults the behavior is the same as before. So find
3744 * out, and only fail if the image has non-default values.
3747 obj_size = (u64)1 << rbd_dev->header.obj_order;
3748 p = &striping_info_buf;
3749 stripe_unit = ceph_decode_64(&p);
3750 if (stripe_unit != obj_size) {
3751 rbd_warn(rbd_dev, "unsupported stripe unit "
3752 "(got %llu want %llu)",
3753 stripe_unit, obj_size);
3756 stripe_count = ceph_decode_64(&p);
3757 if (stripe_count != 1) {
3758 rbd_warn(rbd_dev, "unsupported stripe count "
3759 "(got %llu want 1)", stripe_count);
3762 rbd_dev->header.stripe_unit = stripe_unit;
3763 rbd_dev->header.stripe_count = stripe_count;
3768 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
3770 size_t image_id_size;
3775 void *reply_buf = NULL;
3777 char *image_name = NULL;
3780 rbd_assert(!rbd_dev->spec->image_name);
3782 len = strlen(rbd_dev->spec->image_id);
3783 image_id_size = sizeof (__le32) + len;
3784 image_id = kmalloc(image_id_size, GFP_KERNEL);
3789 end = image_id + image_id_size;
3790 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
3792 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
3793 reply_buf = kmalloc(size, GFP_KERNEL);
3797 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
3798 "rbd", "dir_get_name",
3799 image_id, image_id_size,
3804 end = reply_buf + ret;
3806 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
3807 if (IS_ERR(image_name))
3810 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
3818 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
3820 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
3821 const char *snap_name;
3824 /* Skip over names until we find the one we are looking for */
3826 snap_name = rbd_dev->header.snap_names;
3827 while (which < snapc->num_snaps) {
3828 if (!strcmp(name, snap_name))
3829 return snapc->snaps[which];
3830 snap_name += strlen(snap_name) + 1;
3836 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
3838 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
3843 for (which = 0; !found && which < snapc->num_snaps; which++) {
3844 const char *snap_name;
3846 snap_id = snapc->snaps[which];
3847 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
3848 if (IS_ERR(snap_name))
3850 found = !strcmp(name, snap_name);
3853 return found ? snap_id : CEPH_NOSNAP;
3857 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
3858 * no snapshot by that name is found, or if an error occurs.
3860 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
3862 if (rbd_dev->image_format == 1)
3863 return rbd_v1_snap_id_by_name(rbd_dev, name);
3865 return rbd_v2_snap_id_by_name(rbd_dev, name);
3869 * When an rbd image has a parent image, it is identified by the
3870 * pool, image, and snapshot ids (not names). This function fills
3871 * in the names for those ids. (It's OK if we can't figure out the
3872 * name for an image id, but the pool and snapshot ids should always
3873 * exist and have names.) All names in an rbd spec are dynamically
3876 * When an image being mapped (not a parent) is probed, we have the
3877 * pool name and pool id, image name and image id, and the snapshot
3878 * name. The only thing we're missing is the snapshot id.
3880 static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
3882 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3883 struct rbd_spec *spec = rbd_dev->spec;
3884 const char *pool_name;
3885 const char *image_name;
3886 const char *snap_name;
3890 * An image being mapped will have the pool name (etc.), but
3891 * we need to look up the snapshot id.
3893 if (spec->pool_name) {
3894 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
3897 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
3898 if (snap_id == CEPH_NOSNAP)
3900 spec->snap_id = snap_id;
3902 spec->snap_id = CEPH_NOSNAP;
3908 /* Get the pool name; we have to make our own copy of this */
3910 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
3912 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
3915 pool_name = kstrdup(pool_name, GFP_KERNEL);
3919 /* Fetch the image name; tolerate failure here */
3921 image_name = rbd_dev_image_name(rbd_dev);
3923 rbd_warn(rbd_dev, "unable to get image name");
3925 /* Look up the snapshot name, and make a copy */
3927 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
3933 spec->pool_name = pool_name;
3934 spec->image_name = image_name;
3935 spec->snap_name = snap_name;
3945 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
3954 struct ceph_snap_context *snapc;
3958 * We'll need room for the seq value (maximum snapshot id),
3959 * snapshot count, and array of that many snapshot ids.
3960 * For now we have a fixed upper limit on the number we're
3961 * prepared to receive.
3963 size = sizeof (__le64) + sizeof (__le32) +
3964 RBD_MAX_SNAP_COUNT * sizeof (__le64);
3965 reply_buf = kzalloc(size, GFP_KERNEL);
3969 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3970 "rbd", "get_snapcontext", NULL, 0,
3972 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3977 end = reply_buf + ret;
3979 ceph_decode_64_safe(&p, end, seq, out);
3980 ceph_decode_32_safe(&p, end, snap_count, out);
3983 * Make sure the reported number of snapshot ids wouldn't go
3984 * beyond the end of our buffer. But before checking that,
3985 * make sure the computed size of the snapshot context we
3986 * allocate is representable in a size_t.
3988 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
3993 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
3997 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4003 for (i = 0; i < snap_count; i++)
4004 snapc->snaps[i] = ceph_decode_64(&p);
4006 rbd_dev->header.snapc = snapc;
4008 dout(" snap context seq = %llu, snap_count = %u\n",
4009 (unsigned long long)seq, (unsigned int)snap_count);
4016 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4027 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4028 reply_buf = kmalloc(size, GFP_KERNEL);
4030 return ERR_PTR(-ENOMEM);
4032 snapid = cpu_to_le64(snap_id);
4033 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4034 "rbd", "get_snapshot_name",
4035 &snapid, sizeof (snapid),
4037 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4039 snap_name = ERR_PTR(ret);
4044 end = reply_buf + ret;
4045 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4046 if (IS_ERR(snap_name))
4049 dout(" snap_id 0x%016llx snap_name = %s\n",
4050 (unsigned long long)snap_id, snap_name);
4057 static int rbd_dev_v2_refresh(struct rbd_device *rbd_dev)
4061 down_write(&rbd_dev->header_rwsem);
4063 ret = rbd_dev_v2_image_size(rbd_dev);
4066 rbd_update_mapping_size(rbd_dev);
4068 ret = rbd_dev_v2_snap_context(rbd_dev);
4069 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4073 up_write(&rbd_dev->header_rwsem);
4078 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4083 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
4085 dev = &rbd_dev->dev;
4086 dev->bus = &rbd_bus_type;
4087 dev->type = &rbd_device_type;
4088 dev->parent = &rbd_root_dev;
4089 dev->release = rbd_dev_device_release;
4090 dev_set_name(dev, "%d", rbd_dev->dev_id);
4091 ret = device_register(dev);
4093 mutex_unlock(&ctl_mutex);
4098 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4100 device_unregister(&rbd_dev->dev);
4103 static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
4106 * Get a unique rbd identifier for the given new rbd_dev, and add
4107 * the rbd_dev to the global list. The minimum rbd id is 1.
4109 static void rbd_dev_id_get(struct rbd_device *rbd_dev)
4111 rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
4113 spin_lock(&rbd_dev_list_lock);
4114 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4115 spin_unlock(&rbd_dev_list_lock);
4116 dout("rbd_dev %p given dev id %llu\n", rbd_dev,
4117 (unsigned long long) rbd_dev->dev_id);
4121 * Remove an rbd_dev from the global list, and record that its
4122 * identifier is no longer in use.
4124 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4126 struct list_head *tmp;
4127 int rbd_id = rbd_dev->dev_id;
4130 rbd_assert(rbd_id > 0);
4132 dout("rbd_dev %p released dev id %llu\n", rbd_dev,
4133 (unsigned long long) rbd_dev->dev_id);
4134 spin_lock(&rbd_dev_list_lock);
4135 list_del_init(&rbd_dev->node);
4138 * If the id being "put" is not the current maximum, there
4139 * is nothing special we need to do.
4141 if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
4142 spin_unlock(&rbd_dev_list_lock);
4147 * We need to update the current maximum id. Search the
4148 * list to find out what it is. We're more likely to find
4149 * the maximum at the end, so search the list backward.
4152 list_for_each_prev(tmp, &rbd_dev_list) {
4153 struct rbd_device *rbd_dev;
4155 rbd_dev = list_entry(tmp, struct rbd_device, node);
4156 if (rbd_dev->dev_id > max_id)
4157 max_id = rbd_dev->dev_id;
4159 spin_unlock(&rbd_dev_list_lock);
4162 * The max id could have been updated by rbd_dev_id_get(), in
4163 * which case it now accurately reflects the new maximum.
4164 * Be careful not to overwrite the maximum value in that
4167 atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
4168 dout(" max dev id has been reset\n");
4172 * Skips over white space at *buf, and updates *buf to point to the
4173 * first found non-space character (if any). Returns the length of
4174 * the token (string of non-white space characters) found. Note
4175 * that *buf must be terminated with '\0'.
4177 static inline size_t next_token(const char **buf)
4180 * These are the characters that produce nonzero for
4181 * isspace() in the "C" and "POSIX" locales.
4183 const char *spaces = " \f\n\r\t\v";
4185 *buf += strspn(*buf, spaces); /* Find start of token */
4187 return strcspn(*buf, spaces); /* Return token length */
4191 * Finds the next token in *buf, and if the provided token buffer is
4192 * big enough, copies the found token into it. The result, if
4193 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4194 * must be terminated with '\0' on entry.
4196 * Returns the length of the token found (not including the '\0').
4197 * Return value will be 0 if no token is found, and it will be >=
4198 * token_size if the token would not fit.
4200 * The *buf pointer will be updated to point beyond the end of the
4201 * found token. Note that this occurs even if the token buffer is
4202 * too small to hold it.
4204 static inline size_t copy_token(const char **buf,
4210 len = next_token(buf);
4211 if (len < token_size) {
4212 memcpy(token, *buf, len);
4213 *(token + len) = '\0';
4221 * Finds the next token in *buf, dynamically allocates a buffer big
4222 * enough to hold a copy of it, and copies the token into the new
4223 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4224 * that a duplicate buffer is created even for a zero-length token.
4226 * Returns a pointer to the newly-allocated duplicate, or a null
4227 * pointer if memory for the duplicate was not available. If
4228 * the lenp argument is a non-null pointer, the length of the token
4229 * (not including the '\0') is returned in *lenp.
4231 * If successful, the *buf pointer will be updated to point beyond
4232 * the end of the found token.
4234 * Note: uses GFP_KERNEL for allocation.
4236 static inline char *dup_token(const char **buf, size_t *lenp)
4241 len = next_token(buf);
4242 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4245 *(dup + len) = '\0';
4255 * Parse the options provided for an "rbd add" (i.e., rbd image
4256 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4257 * and the data written is passed here via a NUL-terminated buffer.
4258 * Returns 0 if successful or an error code otherwise.
4260 * The information extracted from these options is recorded in
4261 * the other parameters which return dynamically-allocated
4264 * The address of a pointer that will refer to a ceph options
4265 * structure. Caller must release the returned pointer using
4266 * ceph_destroy_options() when it is no longer needed.
4268 * Address of an rbd options pointer. Fully initialized by
4269 * this function; caller must release with kfree().
4271 * Address of an rbd image specification pointer. Fully
4272 * initialized by this function based on parsed options.
4273 * Caller must release with rbd_spec_put().
4275 * The options passed take this form:
4276 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4279 * A comma-separated list of one or more monitor addresses.
4280 * A monitor address is an ip address, optionally followed
4281 * by a port number (separated by a colon).
4282 * I.e.: ip1[:port1][,ip2[:port2]...]
4284 * A comma-separated list of ceph and/or rbd options.
4286 * The name of the rados pool containing the rbd image.
4288 * The name of the image in that pool to map.
4290 * An optional snapshot id. If provided, the mapping will
4291 * present data from the image at the time that snapshot was
4292 * created. The image head is used if no snapshot id is
4293 * provided. Snapshot mappings are always read-only.
4295 static int rbd_add_parse_args(const char *buf,
4296 struct ceph_options **ceph_opts,
4297 struct rbd_options **opts,
4298 struct rbd_spec **rbd_spec)
4302 const char *mon_addrs;
4304 size_t mon_addrs_size;
4305 struct rbd_spec *spec = NULL;
4306 struct rbd_options *rbd_opts = NULL;
4307 struct ceph_options *copts;
4310 /* The first four tokens are required */
4312 len = next_token(&buf);
4314 rbd_warn(NULL, "no monitor address(es) provided");
4318 mon_addrs_size = len + 1;
4322 options = dup_token(&buf, NULL);
4326 rbd_warn(NULL, "no options provided");
4330 spec = rbd_spec_alloc();
4334 spec->pool_name = dup_token(&buf, NULL);
4335 if (!spec->pool_name)
4337 if (!*spec->pool_name) {
4338 rbd_warn(NULL, "no pool name provided");
4342 spec->image_name = dup_token(&buf, NULL);
4343 if (!spec->image_name)
4345 if (!*spec->image_name) {
4346 rbd_warn(NULL, "no image name provided");
4351 * Snapshot name is optional; default is to use "-"
4352 * (indicating the head/no snapshot).
4354 len = next_token(&buf);
4356 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4357 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4358 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4359 ret = -ENAMETOOLONG;
4362 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4365 *(snap_name + len) = '\0';
4366 spec->snap_name = snap_name;
4368 /* Initialize all rbd options to the defaults */
4370 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4374 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4376 copts = ceph_parse_options(options, mon_addrs,
4377 mon_addrs + mon_addrs_size - 1,
4378 parse_rbd_opts_token, rbd_opts);
4379 if (IS_ERR(copts)) {
4380 ret = PTR_ERR(copts);
4401 * An rbd format 2 image has a unique identifier, distinct from the
4402 * name given to it by the user. Internally, that identifier is
4403 * what's used to specify the names of objects related to the image.
4405 * A special "rbd id" object is used to map an rbd image name to its
4406 * id. If that object doesn't exist, then there is no v2 rbd image
4407 * with the supplied name.
4409 * This function will record the given rbd_dev's image_id field if
4410 * it can be determined, and in that case will return 0. If any
4411 * errors occur a negative errno will be returned and the rbd_dev's
4412 * image_id field will be unchanged (and should be NULL).
4414 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4423 * When probing a parent image, the image id is already
4424 * known (and the image name likely is not). There's no
4425 * need to fetch the image id again in this case. We
4426 * do still need to set the image format though.
4428 if (rbd_dev->spec->image_id) {
4429 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4435 * First, see if the format 2 image id file exists, and if
4436 * so, get the image's persistent id from it.
4438 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
4439 object_name = kmalloc(size, GFP_NOIO);
4442 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
4443 dout("rbd id object name is %s\n", object_name);
4445 /* Response will be an encoded string, which includes a length */
4447 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4448 response = kzalloc(size, GFP_NOIO);
4454 /* If it doesn't exist we'll assume it's a format 1 image */
4456 ret = rbd_obj_method_sync(rbd_dev, object_name,
4457 "rbd", "get_id", NULL, 0,
4458 response, RBD_IMAGE_ID_LEN_MAX);
4459 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4460 if (ret == -ENOENT) {
4461 image_id = kstrdup("", GFP_KERNEL);
4462 ret = image_id ? 0 : -ENOMEM;
4464 rbd_dev->image_format = 1;
4465 } else if (ret > sizeof (__le32)) {
4468 image_id = ceph_extract_encoded_string(&p, p + ret,
4470 ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0;
4472 rbd_dev->image_format = 2;
4478 rbd_dev->spec->image_id = image_id;
4479 dout("image_id is %s\n", image_id);
4488 /* Undo whatever state changes are made by v1 or v2 image probe */
4490 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4492 struct rbd_image_header *header;
4494 rbd_dev_remove_parent(rbd_dev);
4495 rbd_spec_put(rbd_dev->parent_spec);
4496 rbd_dev->parent_spec = NULL;
4497 rbd_dev->parent_overlap = 0;
4499 /* Free dynamic fields from the header, then zero it out */
4501 header = &rbd_dev->header;
4502 ceph_put_snap_context(header->snapc);
4503 kfree(header->snap_sizes);
4504 kfree(header->snap_names);
4505 kfree(header->object_prefix);
4506 memset(header, 0, sizeof (*header));
4509 static int rbd_dev_v1_probe(struct rbd_device *rbd_dev)
4513 /* Populate rbd image metadata */
4515 ret = rbd_read_header(rbd_dev, &rbd_dev->header);
4519 /* Version 1 images have no parent (no layering) */
4521 rbd_dev->parent_spec = NULL;
4522 rbd_dev->parent_overlap = 0;
4524 dout("discovered version 1 image, header name is %s\n",
4525 rbd_dev->header_name);
4530 kfree(rbd_dev->header_name);
4531 rbd_dev->header_name = NULL;
4532 kfree(rbd_dev->spec->image_id);
4533 rbd_dev->spec->image_id = NULL;
4538 static int rbd_dev_v2_probe(struct rbd_device *rbd_dev)
4542 ret = rbd_dev_v2_image_size(rbd_dev);
4546 /* Get the object prefix (a.k.a. block_name) for the image */
4548 ret = rbd_dev_v2_object_prefix(rbd_dev);
4552 /* Get the and check features for the image */
4554 ret = rbd_dev_v2_features(rbd_dev);
4558 /* If the image supports layering, get the parent info */
4560 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
4561 ret = rbd_dev_v2_parent_info(rbd_dev);
4566 * Don't print a warning for parent images. We can
4567 * tell this point because we won't know its pool
4568 * name yet (just its pool id).
4570 if (rbd_dev->spec->pool_name)
4571 rbd_warn(rbd_dev, "WARNING: kernel layering "
4572 "is EXPERIMENTAL!");
4575 /* If the image supports fancy striping, get its parameters */
4577 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
4578 ret = rbd_dev_v2_striping_info(rbd_dev);
4583 /* crypto and compression type aren't (yet) supported for v2 images */
4585 rbd_dev->header.crypt_type = 0;
4586 rbd_dev->header.comp_type = 0;
4588 /* Get the snapshot context, plus the header version */
4590 ret = rbd_dev_v2_snap_context(rbd_dev);
4594 dout("discovered version 2 image, header name is %s\n",
4595 rbd_dev->header_name);
4599 rbd_dev->parent_overlap = 0;
4600 rbd_spec_put(rbd_dev->parent_spec);
4601 rbd_dev->parent_spec = NULL;
4602 kfree(rbd_dev->header_name);
4603 rbd_dev->header_name = NULL;
4604 kfree(rbd_dev->header.object_prefix);
4605 rbd_dev->header.object_prefix = NULL;
4610 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
4612 struct rbd_device *parent = NULL;
4613 struct rbd_spec *parent_spec;
4614 struct rbd_client *rbdc;
4617 if (!rbd_dev->parent_spec)
4620 * We need to pass a reference to the client and the parent
4621 * spec when creating the parent rbd_dev. Images related by
4622 * parent/child relationships always share both.
4624 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
4625 rbdc = __rbd_get_client(rbd_dev->rbd_client);
4628 parent = rbd_dev_create(rbdc, parent_spec);
4632 ret = rbd_dev_image_probe(parent);
4635 rbd_dev->parent = parent;
4640 rbd_spec_put(rbd_dev->parent_spec);
4641 kfree(rbd_dev->header_name);
4642 rbd_dev_destroy(parent);
4644 rbd_put_client(rbdc);
4645 rbd_spec_put(parent_spec);
4651 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4655 ret = rbd_dev_mapping_set(rbd_dev);
4659 /* generate unique id: find highest unique id, add one */
4660 rbd_dev_id_get(rbd_dev);
4662 /* Fill in the device name, now that we have its id. */
4663 BUILD_BUG_ON(DEV_NAME_LEN
4664 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
4665 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
4667 /* Get our block major device number. */
4669 ret = register_blkdev(0, rbd_dev->name);
4672 rbd_dev->major = ret;
4674 /* Set up the blkdev mapping. */
4676 ret = rbd_init_disk(rbd_dev);
4678 goto err_out_blkdev;
4680 ret = rbd_bus_add_dev(rbd_dev);
4684 /* Everything's ready. Announce the disk to the world. */
4686 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
4687 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4688 add_disk(rbd_dev->disk);
4690 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
4691 (unsigned long long) rbd_dev->mapping.size);
4696 rbd_free_disk(rbd_dev);
4698 unregister_blkdev(rbd_dev->major, rbd_dev->name);
4700 rbd_dev_id_put(rbd_dev);
4701 rbd_dev_mapping_clear(rbd_dev);
4706 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
4708 struct rbd_spec *spec = rbd_dev->spec;
4711 /* Record the header object name for this rbd image. */
4713 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4715 if (rbd_dev->image_format == 1)
4716 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
4718 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
4720 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
4721 if (!rbd_dev->header_name)
4724 if (rbd_dev->image_format == 1)
4725 sprintf(rbd_dev->header_name, "%s%s",
4726 spec->image_name, RBD_SUFFIX);
4728 sprintf(rbd_dev->header_name, "%s%s",
4729 RBD_HEADER_PREFIX, spec->image_id);
4733 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
4737 rbd_dev_unprobe(rbd_dev);
4738 ret = rbd_dev_header_watch_sync(rbd_dev, 0);
4740 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
4741 kfree(rbd_dev->header_name);
4742 rbd_dev->header_name = NULL;
4743 rbd_dev->image_format = 0;
4744 kfree(rbd_dev->spec->image_id);
4745 rbd_dev->spec->image_id = NULL;
4747 rbd_dev_destroy(rbd_dev);
4751 * Probe for the existence of the header object for the given rbd
4752 * device. For format 2 images this includes determining the image
4755 static int rbd_dev_image_probe(struct rbd_device *rbd_dev)
4761 * Get the id from the image id object. If it's not a
4762 * format 2 image, we'll get ENOENT back, and we'll assume
4763 * it's a format 1 image.
4765 ret = rbd_dev_image_id(rbd_dev);
4768 rbd_assert(rbd_dev->spec->image_id);
4769 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4771 ret = rbd_dev_header_name(rbd_dev);
4773 goto err_out_format;
4775 ret = rbd_dev_header_watch_sync(rbd_dev, 1);
4777 goto out_header_name;
4779 if (rbd_dev->image_format == 1)
4780 ret = rbd_dev_v1_probe(rbd_dev);
4782 ret = rbd_dev_v2_probe(rbd_dev);
4786 ret = rbd_dev_spec_update(rbd_dev);
4790 ret = rbd_dev_probe_parent(rbd_dev);
4795 rbd_dev_unprobe(rbd_dev);
4797 tmp = rbd_dev_header_watch_sync(rbd_dev, 0);
4799 rbd_warn(rbd_dev, "unable to tear down watch request\n");
4801 kfree(rbd_dev->header_name);
4802 rbd_dev->header_name = NULL;
4804 rbd_dev->image_format = 0;
4805 kfree(rbd_dev->spec->image_id);
4806 rbd_dev->spec->image_id = NULL;
4808 dout("probe failed, returning %d\n", ret);
4813 static ssize_t rbd_add(struct bus_type *bus,
4817 struct rbd_device *rbd_dev = NULL;
4818 struct ceph_options *ceph_opts = NULL;
4819 struct rbd_options *rbd_opts = NULL;
4820 struct rbd_spec *spec = NULL;
4821 struct rbd_client *rbdc;
4822 struct ceph_osd_client *osdc;
4825 if (!try_module_get(THIS_MODULE))
4828 /* parse add command */
4829 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
4831 goto err_out_module;
4833 rbdc = rbd_get_client(ceph_opts);
4838 ceph_opts = NULL; /* rbd_dev client now owns this */
4841 osdc = &rbdc->client->osdc;
4842 rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
4844 goto err_out_client;
4845 spec->pool_id = (u64)rc;
4847 /* The ceph file layout needs to fit pool id in 32 bits */
4849 if (spec->pool_id > (u64)U32_MAX) {
4850 rbd_warn(NULL, "pool id too large (%llu > %u)\n",
4851 (unsigned long long)spec->pool_id, U32_MAX);
4853 goto err_out_client;
4856 rbd_dev = rbd_dev_create(rbdc, spec);
4858 goto err_out_client;
4859 rbdc = NULL; /* rbd_dev now owns this */
4860 spec = NULL; /* rbd_dev now owns this */
4862 rbd_dev->mapping.read_only = rbd_opts->read_only;
4864 rbd_opts = NULL; /* done with this */
4866 rc = rbd_dev_image_probe(rbd_dev);
4868 goto err_out_rbd_dev;
4870 rc = rbd_dev_device_setup(rbd_dev);
4874 rbd_dev_image_release(rbd_dev);
4876 rbd_dev_destroy(rbd_dev);
4878 rbd_put_client(rbdc);
4881 ceph_destroy_options(ceph_opts);
4885 module_put(THIS_MODULE);
4887 dout("Error adding device %s\n", buf);
4892 static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
4894 struct list_head *tmp;
4895 struct rbd_device *rbd_dev;
4897 spin_lock(&rbd_dev_list_lock);
4898 list_for_each(tmp, &rbd_dev_list) {
4899 rbd_dev = list_entry(tmp, struct rbd_device, node);
4900 if (rbd_dev->dev_id == dev_id) {
4901 spin_unlock(&rbd_dev_list_lock);
4905 spin_unlock(&rbd_dev_list_lock);
4909 static void rbd_dev_device_release(struct device *dev)
4911 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4913 rbd_free_disk(rbd_dev);
4914 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4915 rbd_dev_clear_mapping(rbd_dev);
4916 unregister_blkdev(rbd_dev->major, rbd_dev->name);
4918 rbd_dev_id_put(rbd_dev);
4919 rbd_dev_mapping_clear(rbd_dev);
4922 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
4924 while (rbd_dev->parent) {
4925 struct rbd_device *first = rbd_dev;
4926 struct rbd_device *second = first->parent;
4927 struct rbd_device *third;
4930 * Follow to the parent with no grandparent and
4933 while (second && (third = second->parent)) {
4938 rbd_dev_image_release(second);
4939 first->parent = NULL;
4940 first->parent_overlap = 0;
4942 rbd_assert(first->parent_spec);
4943 rbd_spec_put(first->parent_spec);
4944 first->parent_spec = NULL;
4948 static ssize_t rbd_remove(struct bus_type *bus,
4952 struct rbd_device *rbd_dev = NULL;
4957 ret = strict_strtoul(buf, 10, &ul);
4961 /* convert to int; abort if we lost anything in the conversion */
4962 target_id = (int) ul;
4963 if (target_id != ul)
4966 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
4968 rbd_dev = __rbd_get_dev(target_id);
4974 spin_lock_irq(&rbd_dev->lock);
4975 if (rbd_dev->open_count)
4978 set_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
4979 spin_unlock_irq(&rbd_dev->lock);
4983 rbd_bus_del_dev(rbd_dev);
4984 rbd_dev_image_release(rbd_dev);
4985 module_put(THIS_MODULE);
4987 mutex_unlock(&ctl_mutex);
4993 * create control files in sysfs
4996 static int rbd_sysfs_init(void)
5000 ret = device_register(&rbd_root_dev);
5004 ret = bus_register(&rbd_bus_type);
5006 device_unregister(&rbd_root_dev);
5011 static void rbd_sysfs_cleanup(void)
5013 bus_unregister(&rbd_bus_type);
5014 device_unregister(&rbd_root_dev);
5017 static int rbd_slab_init(void)
5019 rbd_assert(!rbd_img_request_cache);
5020 rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5021 sizeof (struct rbd_img_request),
5022 __alignof__(struct rbd_img_request),
5024 if (!rbd_img_request_cache)
5027 rbd_assert(!rbd_obj_request_cache);
5028 rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5029 sizeof (struct rbd_obj_request),
5030 __alignof__(struct rbd_obj_request),
5032 if (!rbd_obj_request_cache)
5035 rbd_assert(!rbd_segment_name_cache);
5036 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5037 MAX_OBJ_NAME_SIZE + 1, 1, 0, NULL);
5038 if (rbd_segment_name_cache)
5041 if (rbd_obj_request_cache) {
5042 kmem_cache_destroy(rbd_obj_request_cache);
5043 rbd_obj_request_cache = NULL;
5046 kmem_cache_destroy(rbd_img_request_cache);
5047 rbd_img_request_cache = NULL;
5052 static void rbd_slab_exit(void)
5054 rbd_assert(rbd_segment_name_cache);
5055 kmem_cache_destroy(rbd_segment_name_cache);
5056 rbd_segment_name_cache = NULL;
5058 rbd_assert(rbd_obj_request_cache);
5059 kmem_cache_destroy(rbd_obj_request_cache);
5060 rbd_obj_request_cache = NULL;
5062 rbd_assert(rbd_img_request_cache);
5063 kmem_cache_destroy(rbd_img_request_cache);
5064 rbd_img_request_cache = NULL;
5067 static int __init rbd_init(void)
5071 if (!libceph_compatible(NULL)) {
5072 rbd_warn(NULL, "libceph incompatibility (quitting)");
5076 rc = rbd_slab_init();
5079 rc = rbd_sysfs_init();
5083 pr_info("loaded " RBD_DRV_NAME_LONG "\n");
5088 static void __exit rbd_exit(void)
5090 rbd_sysfs_cleanup();
5094 module_init(rbd_init);
5095 module_exit(rbd_exit);
5097 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5098 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5099 MODULE_DESCRIPTION("rados block device");
5101 /* following authorship retained from original osdblk.c */
5102 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5104 MODULE_LICENSE("GPL");