3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
42 #include <linux/blkdev.h>
43 #include <linux/slab.h>
45 #include "rbd_types.h"
47 #define RBD_DEBUG /* Activate rbd_assert() calls */
50 * The basic unit of block I/O is a sector. It is interpreted in a
51 * number of contexts in Linux (blk, bio, genhd), but the default is
52 * universally 512 bytes. These symbols are just slightly more
53 * meaningful than the bare numbers they represent.
55 #define SECTOR_SHIFT 9
56 #define SECTOR_SIZE (1ULL << SECTOR_SHIFT)
59 * Increment the given counter and return its updated value.
60 * If the counter is already 0 it will not be incremented.
61 * If the counter is already at its maximum value returns
62 * -EINVAL without updating it.
64 static int atomic_inc_return_safe(atomic_t *v)
68 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
69 if (counter <= (unsigned int)INT_MAX)
77 /* Decrement the counter. Return the resulting value, or -EINVAL */
78 static int atomic_dec_return_safe(atomic_t *v)
82 counter = atomic_dec_return(v);
91 #define RBD_DRV_NAME "rbd"
92 #define RBD_DRV_NAME_LONG "rbd (rados block device)"
94 #define RBD_MINORS_PER_MAJOR 256 /* max minors per blkdev */
96 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
97 #define RBD_MAX_SNAP_NAME_LEN \
98 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
100 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
102 #define RBD_SNAP_HEAD_NAME "-"
104 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
106 /* This allows a single page to hold an image name sent by OSD */
107 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
108 #define RBD_IMAGE_ID_LEN_MAX 64
110 #define RBD_OBJ_PREFIX_LEN_MAX 64
114 #define RBD_FEATURE_LAYERING (1<<0)
115 #define RBD_FEATURE_STRIPINGV2 (1<<1)
116 #define RBD_FEATURES_ALL \
117 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
119 /* Features supported by this (client software) implementation. */
121 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
124 * An RBD device name will be "rbd#", where the "rbd" comes from
125 * RBD_DRV_NAME above, and # is a unique integer identifier.
126 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
127 * enough to hold all possible device names.
129 #define DEV_NAME_LEN 32
130 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
133 * block device image metadata (in-memory version)
135 struct rbd_image_header {
136 /* These six fields never change for a given rbd image */
143 u64 features; /* Might be changeable someday? */
145 /* The remaining fields need to be updated occasionally */
147 struct ceph_snap_context *snapc;
148 char *snap_names; /* format 1 only */
149 u64 *snap_sizes; /* format 1 only */
153 * An rbd image specification.
155 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
156 * identify an image. Each rbd_dev structure includes a pointer to
157 * an rbd_spec structure that encapsulates this identity.
159 * Each of the id's in an rbd_spec has an associated name. For a
160 * user-mapped image, the names are supplied and the id's associated
161 * with them are looked up. For a layered image, a parent image is
162 * defined by the tuple, and the names are looked up.
164 * An rbd_dev structure contains a parent_spec pointer which is
165 * non-null if the image it represents is a child in a layered
166 * image. This pointer will refer to the rbd_spec structure used
167 * by the parent rbd_dev for its own identity (i.e., the structure
168 * is shared between the parent and child).
170 * Since these structures are populated once, during the discovery
171 * phase of image construction, they are effectively immutable so
172 * we make no effort to synchronize access to them.
174 * Note that code herein does not assume the image name is known (it
175 * could be a null pointer).
179 const char *pool_name;
181 const char *image_id;
182 const char *image_name;
185 const char *snap_name;
191 * an instance of the client. multiple devices may share an rbd client.
194 struct ceph_client *client;
196 struct list_head node;
199 struct rbd_img_request;
200 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
202 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
204 struct rbd_obj_request;
205 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
207 enum obj_request_type {
208 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
212 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
213 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
214 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
215 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
218 struct rbd_obj_request {
219 const char *object_name;
220 u64 offset; /* object start byte */
221 u64 length; /* bytes from offset */
225 * An object request associated with an image will have its
226 * img_data flag set; a standalone object request will not.
228 * A standalone object request will have which == BAD_WHICH
229 * and a null obj_request pointer.
231 * An object request initiated in support of a layered image
232 * object (to check for its existence before a write) will
233 * have which == BAD_WHICH and a non-null obj_request pointer.
235 * Finally, an object request for rbd image data will have
236 * which != BAD_WHICH, and will have a non-null img_request
237 * pointer. The value of which will be in the range
238 * 0..(img_request->obj_request_count-1).
241 struct rbd_obj_request *obj_request; /* STAT op */
243 struct rbd_img_request *img_request;
245 /* links for img_request->obj_requests list */
246 struct list_head links;
249 u32 which; /* posn image request list */
251 enum obj_request_type type;
253 struct bio *bio_list;
259 struct page **copyup_pages;
260 u32 copyup_page_count;
262 struct ceph_osd_request *osd_req;
264 u64 xferred; /* bytes transferred */
267 rbd_obj_callback_t callback;
268 struct completion completion;
274 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
275 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
276 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
279 struct rbd_img_request {
280 struct rbd_device *rbd_dev;
281 u64 offset; /* starting image byte offset */
282 u64 length; /* byte count from offset */
285 u64 snap_id; /* for reads */
286 struct ceph_snap_context *snapc; /* for writes */
289 struct request *rq; /* block request */
290 struct rbd_obj_request *obj_request; /* obj req initiator */
292 struct page **copyup_pages;
293 u32 copyup_page_count;
294 spinlock_t completion_lock;/* protects next_completion */
296 rbd_img_callback_t callback;
297 u64 xferred;/* aggregate bytes transferred */
298 int result; /* first nonzero obj_request result */
300 u32 obj_request_count;
301 struct list_head obj_requests; /* rbd_obj_request structs */
306 #define for_each_obj_request(ireq, oreq) \
307 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
308 #define for_each_obj_request_from(ireq, oreq) \
309 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
310 #define for_each_obj_request_safe(ireq, oreq, n) \
311 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
323 int dev_id; /* blkdev unique id */
325 int major; /* blkdev assigned major */
326 struct gendisk *disk; /* blkdev's gendisk and rq */
328 u32 image_format; /* Either 1 or 2 */
329 struct rbd_client *rbd_client;
331 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
333 spinlock_t lock; /* queue, flags, open_count */
335 struct rbd_image_header header;
336 unsigned long flags; /* possibly lock protected */
337 struct rbd_spec *spec;
341 struct ceph_file_layout layout;
343 struct ceph_osd_event *watch_event;
344 struct rbd_obj_request *watch_request;
346 struct rbd_spec *parent_spec;
349 struct rbd_device *parent;
351 /* protects updating the header */
352 struct rw_semaphore header_rwsem;
354 struct rbd_mapping mapping;
356 struct list_head node;
360 unsigned long open_count; /* protected by lock */
364 * Flag bits for rbd_dev->flags. If atomicity is required,
365 * rbd_dev->lock is used to protect access.
367 * Currently, only the "removing" flag (which is coupled with the
368 * "open_count" field) requires atomic access.
371 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
372 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
375 static DEFINE_MUTEX(ctl_mutex); /* Serialize open/close/setup/teardown */
377 static LIST_HEAD(rbd_dev_list); /* devices */
378 static DEFINE_SPINLOCK(rbd_dev_list_lock);
380 static LIST_HEAD(rbd_client_list); /* clients */
381 static DEFINE_SPINLOCK(rbd_client_list_lock);
383 /* Slab caches for frequently-allocated structures */
385 static struct kmem_cache *rbd_img_request_cache;
386 static struct kmem_cache *rbd_obj_request_cache;
387 static struct kmem_cache *rbd_segment_name_cache;
389 static int rbd_img_request_submit(struct rbd_img_request *img_request);
391 static void rbd_dev_device_release(struct device *dev);
393 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
395 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
397 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping);
398 static void rbd_spec_put(struct rbd_spec *spec);
400 static struct bus_attribute rbd_bus_attrs[] = {
401 __ATTR(add, S_IWUSR, NULL, rbd_add),
402 __ATTR(remove, S_IWUSR, NULL, rbd_remove),
406 static struct bus_type rbd_bus_type = {
408 .bus_attrs = rbd_bus_attrs,
411 static void rbd_root_dev_release(struct device *dev)
415 static struct device rbd_root_dev = {
417 .release = rbd_root_dev_release,
420 static __printf(2, 3)
421 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
423 struct va_format vaf;
431 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
432 else if (rbd_dev->disk)
433 printk(KERN_WARNING "%s: %s: %pV\n",
434 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
435 else if (rbd_dev->spec && rbd_dev->spec->image_name)
436 printk(KERN_WARNING "%s: image %s: %pV\n",
437 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
438 else if (rbd_dev->spec && rbd_dev->spec->image_id)
439 printk(KERN_WARNING "%s: id %s: %pV\n",
440 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
442 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
443 RBD_DRV_NAME, rbd_dev, &vaf);
448 #define rbd_assert(expr) \
449 if (unlikely(!(expr))) { \
450 printk(KERN_ERR "\nAssertion failure in %s() " \
452 "\trbd_assert(%s);\n\n", \
453 __func__, __LINE__, #expr); \
456 #else /* !RBD_DEBUG */
457 # define rbd_assert(expr) ((void) 0)
458 #endif /* !RBD_DEBUG */
460 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
461 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
462 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
464 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
465 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
466 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev);
467 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
469 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
470 u8 *order, u64 *snap_size);
471 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
473 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
475 static int rbd_open(struct block_device *bdev, fmode_t mode)
477 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
478 bool removing = false;
480 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
483 spin_lock_irq(&rbd_dev->lock);
484 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
487 rbd_dev->open_count++;
488 spin_unlock_irq(&rbd_dev->lock);
492 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
493 (void) get_device(&rbd_dev->dev);
494 set_device_ro(bdev, rbd_dev->mapping.read_only);
495 mutex_unlock(&ctl_mutex);
500 static void rbd_release(struct gendisk *disk, fmode_t mode)
502 struct rbd_device *rbd_dev = disk->private_data;
503 unsigned long open_count_before;
505 spin_lock_irq(&rbd_dev->lock);
506 open_count_before = rbd_dev->open_count--;
507 spin_unlock_irq(&rbd_dev->lock);
508 rbd_assert(open_count_before > 0);
510 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
511 put_device(&rbd_dev->dev);
512 mutex_unlock(&ctl_mutex);
515 static const struct block_device_operations rbd_bd_ops = {
516 .owner = THIS_MODULE,
518 .release = rbd_release,
522 * Initialize an rbd client instance.
525 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
527 struct rbd_client *rbdc;
530 dout("%s:\n", __func__);
531 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
535 kref_init(&rbdc->kref);
536 INIT_LIST_HEAD(&rbdc->node);
538 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
540 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
541 if (IS_ERR(rbdc->client))
543 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
545 ret = ceph_open_session(rbdc->client);
549 spin_lock(&rbd_client_list_lock);
550 list_add_tail(&rbdc->node, &rbd_client_list);
551 spin_unlock(&rbd_client_list_lock);
553 mutex_unlock(&ctl_mutex);
554 dout("%s: rbdc %p\n", __func__, rbdc);
559 ceph_destroy_client(rbdc->client);
561 mutex_unlock(&ctl_mutex);
565 ceph_destroy_options(ceph_opts);
566 dout("%s: error %d\n", __func__, ret);
571 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
573 kref_get(&rbdc->kref);
579 * Find a ceph client with specific addr and configuration. If
580 * found, bump its reference count.
582 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
584 struct rbd_client *client_node;
587 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
590 spin_lock(&rbd_client_list_lock);
591 list_for_each_entry(client_node, &rbd_client_list, node) {
592 if (!ceph_compare_options(ceph_opts, client_node->client)) {
593 __rbd_get_client(client_node);
599 spin_unlock(&rbd_client_list_lock);
601 return found ? client_node : NULL;
611 /* string args above */
614 /* Boolean args above */
618 static match_table_t rbd_opts_tokens = {
620 /* string args above */
621 {Opt_read_only, "read_only"},
622 {Opt_read_only, "ro"}, /* Alternate spelling */
623 {Opt_read_write, "read_write"},
624 {Opt_read_write, "rw"}, /* Alternate spelling */
625 /* Boolean args above */
633 #define RBD_READ_ONLY_DEFAULT false
635 static int parse_rbd_opts_token(char *c, void *private)
637 struct rbd_options *rbd_opts = private;
638 substring_t argstr[MAX_OPT_ARGS];
639 int token, intval, ret;
641 token = match_token(c, rbd_opts_tokens, argstr);
645 if (token < Opt_last_int) {
646 ret = match_int(&argstr[0], &intval);
648 pr_err("bad mount option arg (not int) "
652 dout("got int token %d val %d\n", token, intval);
653 } else if (token > Opt_last_int && token < Opt_last_string) {
654 dout("got string token %d val %s\n", token,
656 } else if (token > Opt_last_string && token < Opt_last_bool) {
657 dout("got Boolean token %d\n", token);
659 dout("got token %d\n", token);
664 rbd_opts->read_only = true;
667 rbd_opts->read_only = false;
677 * Get a ceph client with specific addr and configuration, if one does
678 * not exist create it.
680 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
682 struct rbd_client *rbdc;
684 rbdc = rbd_client_find(ceph_opts);
685 if (rbdc) /* using an existing client */
686 ceph_destroy_options(ceph_opts);
688 rbdc = rbd_client_create(ceph_opts);
694 * Destroy ceph client
696 * Caller must hold rbd_client_list_lock.
698 static void rbd_client_release(struct kref *kref)
700 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
702 dout("%s: rbdc %p\n", __func__, rbdc);
703 spin_lock(&rbd_client_list_lock);
704 list_del(&rbdc->node);
705 spin_unlock(&rbd_client_list_lock);
707 ceph_destroy_client(rbdc->client);
712 * Drop reference to ceph client node. If it's not referenced anymore, release
715 static void rbd_put_client(struct rbd_client *rbdc)
718 kref_put(&rbdc->kref, rbd_client_release);
721 static bool rbd_image_format_valid(u32 image_format)
723 return image_format == 1 || image_format == 2;
726 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
731 /* The header has to start with the magic rbd header text */
732 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
735 /* The bio layer requires at least sector-sized I/O */
737 if (ondisk->options.order < SECTOR_SHIFT)
740 /* If we use u64 in a few spots we may be able to loosen this */
742 if (ondisk->options.order > 8 * sizeof (int) - 1)
746 * The size of a snapshot header has to fit in a size_t, and
747 * that limits the number of snapshots.
749 snap_count = le32_to_cpu(ondisk->snap_count);
750 size = SIZE_MAX - sizeof (struct ceph_snap_context);
751 if (snap_count > size / sizeof (__le64))
755 * Not only that, but the size of the entire the snapshot
756 * header must also be representable in a size_t.
758 size -= snap_count * sizeof (__le64);
759 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
766 * Fill an rbd image header with information from the given format 1
769 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
770 struct rbd_image_header_ondisk *ondisk)
772 struct rbd_image_header *header = &rbd_dev->header;
773 bool first_time = header->object_prefix == NULL;
774 struct ceph_snap_context *snapc;
775 char *object_prefix = NULL;
776 char *snap_names = NULL;
777 u64 *snap_sizes = NULL;
783 /* Allocate this now to avoid having to handle failure below */
788 len = strnlen(ondisk->object_prefix,
789 sizeof (ondisk->object_prefix));
790 object_prefix = kmalloc(len + 1, GFP_KERNEL);
793 memcpy(object_prefix, ondisk->object_prefix, len);
794 object_prefix[len] = '\0';
797 /* Allocate the snapshot context and fill it in */
799 snap_count = le32_to_cpu(ondisk->snap_count);
800 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
803 snapc->seq = le64_to_cpu(ondisk->snap_seq);
805 struct rbd_image_snap_ondisk *snaps;
806 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
808 /* We'll keep a copy of the snapshot names... */
810 if (snap_names_len > (u64)SIZE_MAX)
812 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
816 /* ...as well as the array of their sizes. */
818 size = snap_count * sizeof (*header->snap_sizes);
819 snap_sizes = kmalloc(size, GFP_KERNEL);
824 * Copy the names, and fill in each snapshot's id
827 * Note that rbd_dev_v1_header_info() guarantees the
828 * ondisk buffer we're working with has
829 * snap_names_len bytes beyond the end of the
830 * snapshot id array, this memcpy() is safe.
832 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
833 snaps = ondisk->snaps;
834 for (i = 0; i < snap_count; i++) {
835 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
836 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
840 /* We won't fail any more, fill in the header */
842 down_write(&rbd_dev->header_rwsem);
844 header->object_prefix = object_prefix;
845 header->obj_order = ondisk->options.order;
846 header->crypt_type = ondisk->options.crypt_type;
847 header->comp_type = ondisk->options.comp_type;
848 /* The rest aren't used for format 1 images */
849 header->stripe_unit = 0;
850 header->stripe_count = 0;
851 header->features = 0;
853 ceph_put_snap_context(header->snapc);
854 kfree(header->snap_names);
855 kfree(header->snap_sizes);
858 /* The remaining fields always get updated (when we refresh) */
860 header->image_size = le64_to_cpu(ondisk->image_size);
861 header->snapc = snapc;
862 header->snap_names = snap_names;
863 header->snap_sizes = snap_sizes;
865 /* Make sure mapping size is consistent with header info */
867 if (rbd_dev->spec->snap_id == CEPH_NOSNAP || first_time)
868 if (rbd_dev->mapping.size != header->image_size)
869 rbd_dev->mapping.size = header->image_size;
871 up_write(&rbd_dev->header_rwsem);
879 ceph_put_snap_context(snapc);
880 kfree(object_prefix);
885 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
887 const char *snap_name;
889 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
891 /* Skip over names until we find the one we are looking for */
893 snap_name = rbd_dev->header.snap_names;
895 snap_name += strlen(snap_name) + 1;
897 return kstrdup(snap_name, GFP_KERNEL);
901 * Snapshot id comparison function for use with qsort()/bsearch().
902 * Note that result is for snapshots in *descending* order.
904 static int snapid_compare_reverse(const void *s1, const void *s2)
906 u64 snap_id1 = *(u64 *)s1;
907 u64 snap_id2 = *(u64 *)s2;
909 if (snap_id1 < snap_id2)
911 return snap_id1 == snap_id2 ? 0 : -1;
915 * Search a snapshot context to see if the given snapshot id is
918 * Returns the position of the snapshot id in the array if it's found,
919 * or BAD_SNAP_INDEX otherwise.
921 * Note: The snapshot array is in kept sorted (by the osd) in
922 * reverse order, highest snapshot id first.
924 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
926 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
929 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
930 sizeof (snap_id), snapid_compare_reverse);
932 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
935 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
940 which = rbd_dev_snap_index(rbd_dev, snap_id);
941 if (which == BAD_SNAP_INDEX)
944 return _rbd_dev_v1_snap_name(rbd_dev, which);
947 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
949 if (snap_id == CEPH_NOSNAP)
950 return RBD_SNAP_HEAD_NAME;
952 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
953 if (rbd_dev->image_format == 1)
954 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
956 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
959 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
962 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
963 if (snap_id == CEPH_NOSNAP) {
964 *snap_size = rbd_dev->header.image_size;
965 } else if (rbd_dev->image_format == 1) {
968 which = rbd_dev_snap_index(rbd_dev, snap_id);
969 if (which == BAD_SNAP_INDEX)
972 *snap_size = rbd_dev->header.snap_sizes[which];
977 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
986 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
989 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
990 if (snap_id == CEPH_NOSNAP) {
991 *snap_features = rbd_dev->header.features;
992 } else if (rbd_dev->image_format == 1) {
993 *snap_features = 0; /* No features for format 1 */
998 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1002 *snap_features = features;
1007 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1009 u64 snap_id = rbd_dev->spec->snap_id;
1014 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1017 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1021 rbd_dev->mapping.size = size;
1022 rbd_dev->mapping.features = features;
1027 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1029 rbd_dev->mapping.size = 0;
1030 rbd_dev->mapping.features = 0;
1033 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1039 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1042 segment = offset >> rbd_dev->header.obj_order;
1043 ret = snprintf(name, MAX_OBJ_NAME_SIZE + 1, "%s.%012llx",
1044 rbd_dev->header.object_prefix, segment);
1045 if (ret < 0 || ret > MAX_OBJ_NAME_SIZE) {
1046 pr_err("error formatting segment name for #%llu (%d)\n",
1055 static void rbd_segment_name_free(const char *name)
1057 /* The explicit cast here is needed to drop the const qualifier */
1059 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1062 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1064 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1066 return offset & (segment_size - 1);
1069 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1070 u64 offset, u64 length)
1072 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1074 offset &= segment_size - 1;
1076 rbd_assert(length <= U64_MAX - offset);
1077 if (offset + length > segment_size)
1078 length = segment_size - offset;
1084 * returns the size of an object in the image
1086 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1088 return 1 << header->obj_order;
1095 static void bio_chain_put(struct bio *chain)
1101 chain = chain->bi_next;
1107 * zeros a bio chain, starting at specific offset
1109 static void zero_bio_chain(struct bio *chain, int start_ofs)
1112 unsigned long flags;
1118 bio_for_each_segment(bv, chain, i) {
1119 if (pos + bv->bv_len > start_ofs) {
1120 int remainder = max(start_ofs - pos, 0);
1121 buf = bvec_kmap_irq(bv, &flags);
1122 memset(buf + remainder, 0,
1123 bv->bv_len - remainder);
1124 bvec_kunmap_irq(buf, &flags);
1129 chain = chain->bi_next;
1134 * similar to zero_bio_chain(), zeros data defined by a page array,
1135 * starting at the given byte offset from the start of the array and
1136 * continuing up to the given end offset. The pages array is
1137 * assumed to be big enough to hold all bytes up to the end.
1139 static void zero_pages(struct page **pages, u64 offset, u64 end)
1141 struct page **page = &pages[offset >> PAGE_SHIFT];
1143 rbd_assert(end > offset);
1144 rbd_assert(end - offset <= (u64)SIZE_MAX);
1145 while (offset < end) {
1148 unsigned long flags;
1151 page_offset = (size_t)(offset & ~PAGE_MASK);
1152 length = min(PAGE_SIZE - page_offset, (size_t)(end - offset));
1153 local_irq_save(flags);
1154 kaddr = kmap_atomic(*page);
1155 memset(kaddr + page_offset, 0, length);
1156 kunmap_atomic(kaddr);
1157 local_irq_restore(flags);
1165 * Clone a portion of a bio, starting at the given byte offset
1166 * and continuing for the number of bytes indicated.
1168 static struct bio *bio_clone_range(struct bio *bio_src,
1169 unsigned int offset,
1177 unsigned short end_idx;
1178 unsigned short vcnt;
1181 /* Handle the easy case for the caller */
1183 if (!offset && len == bio_src->bi_size)
1184 return bio_clone(bio_src, gfpmask);
1186 if (WARN_ON_ONCE(!len))
1188 if (WARN_ON_ONCE(len > bio_src->bi_size))
1190 if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
1193 /* Find first affected segment... */
1196 bio_for_each_segment(bv, bio_src, idx) {
1197 if (resid < bv->bv_len)
1199 resid -= bv->bv_len;
1203 /* ...and the last affected segment */
1206 __bio_for_each_segment(bv, bio_src, end_idx, idx) {
1207 if (resid <= bv->bv_len)
1209 resid -= bv->bv_len;
1211 vcnt = end_idx - idx + 1;
1213 /* Build the clone */
1215 bio = bio_alloc(gfpmask, (unsigned int) vcnt);
1217 return NULL; /* ENOMEM */
1219 bio->bi_bdev = bio_src->bi_bdev;
1220 bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT);
1221 bio->bi_rw = bio_src->bi_rw;
1222 bio->bi_flags |= 1 << BIO_CLONED;
1225 * Copy over our part of the bio_vec, then update the first
1226 * and last (or only) entries.
1228 memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
1229 vcnt * sizeof (struct bio_vec));
1230 bio->bi_io_vec[0].bv_offset += voff;
1232 bio->bi_io_vec[0].bv_len -= voff;
1233 bio->bi_io_vec[vcnt - 1].bv_len = resid;
1235 bio->bi_io_vec[0].bv_len = len;
1238 bio->bi_vcnt = vcnt;
1246 * Clone a portion of a bio chain, starting at the given byte offset
1247 * into the first bio in the source chain and continuing for the
1248 * number of bytes indicated. The result is another bio chain of
1249 * exactly the given length, or a null pointer on error.
1251 * The bio_src and offset parameters are both in-out. On entry they
1252 * refer to the first source bio and the offset into that bio where
1253 * the start of data to be cloned is located.
1255 * On return, bio_src is updated to refer to the bio in the source
1256 * chain that contains first un-cloned byte, and *offset will
1257 * contain the offset of that byte within that bio.
1259 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1260 unsigned int *offset,
1264 struct bio *bi = *bio_src;
1265 unsigned int off = *offset;
1266 struct bio *chain = NULL;
1269 /* Build up a chain of clone bios up to the limit */
1271 if (!bi || off >= bi->bi_size || !len)
1272 return NULL; /* Nothing to clone */
1276 unsigned int bi_size;
1280 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1281 goto out_err; /* EINVAL; ran out of bio's */
1283 bi_size = min_t(unsigned int, bi->bi_size - off, len);
1284 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1286 goto out_err; /* ENOMEM */
1289 end = &bio->bi_next;
1292 if (off == bi->bi_size) {
1303 bio_chain_put(chain);
1309 * The default/initial value for all object request flags is 0. For
1310 * each flag, once its value is set to 1 it is never reset to 0
1313 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1315 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1316 struct rbd_device *rbd_dev;
1318 rbd_dev = obj_request->img_request->rbd_dev;
1319 rbd_warn(rbd_dev, "obj_request %p already marked img_data\n",
1324 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1327 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1330 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1332 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1333 struct rbd_device *rbd_dev = NULL;
1335 if (obj_request_img_data_test(obj_request))
1336 rbd_dev = obj_request->img_request->rbd_dev;
1337 rbd_warn(rbd_dev, "obj_request %p already marked done\n",
1342 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1345 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1349 * This sets the KNOWN flag after (possibly) setting the EXISTS
1350 * flag. The latter is set based on the "exists" value provided.
1352 * Note that for our purposes once an object exists it never goes
1353 * away again. It's possible that the response from two existence
1354 * checks are separated by the creation of the target object, and
1355 * the first ("doesn't exist") response arrives *after* the second
1356 * ("does exist"). In that case we ignore the second one.
1358 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1362 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1363 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1367 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1370 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1373 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1376 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1379 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1381 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1382 atomic_read(&obj_request->kref.refcount));
1383 kref_get(&obj_request->kref);
1386 static void rbd_obj_request_destroy(struct kref *kref);
1387 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1389 rbd_assert(obj_request != NULL);
1390 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1391 atomic_read(&obj_request->kref.refcount));
1392 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1395 static bool img_request_child_test(struct rbd_img_request *img_request);
1396 static void rbd_parent_request_destroy(struct kref *kref);
1397 static void rbd_img_request_destroy(struct kref *kref);
1398 static void rbd_img_request_put(struct rbd_img_request *img_request)
1400 rbd_assert(img_request != NULL);
1401 dout("%s: img %p (was %d)\n", __func__, img_request,
1402 atomic_read(&img_request->kref.refcount));
1403 if (img_request_child_test(img_request))
1404 kref_put(&img_request->kref, rbd_parent_request_destroy);
1406 kref_put(&img_request->kref, rbd_img_request_destroy);
1409 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1410 struct rbd_obj_request *obj_request)
1412 rbd_assert(obj_request->img_request == NULL);
1414 /* Image request now owns object's original reference */
1415 obj_request->img_request = img_request;
1416 obj_request->which = img_request->obj_request_count;
1417 rbd_assert(!obj_request_img_data_test(obj_request));
1418 obj_request_img_data_set(obj_request);
1419 rbd_assert(obj_request->which != BAD_WHICH);
1420 img_request->obj_request_count++;
1421 list_add_tail(&obj_request->links, &img_request->obj_requests);
1422 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1423 obj_request->which);
1426 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1427 struct rbd_obj_request *obj_request)
1429 rbd_assert(obj_request->which != BAD_WHICH);
1431 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1432 obj_request->which);
1433 list_del(&obj_request->links);
1434 rbd_assert(img_request->obj_request_count > 0);
1435 img_request->obj_request_count--;
1436 rbd_assert(obj_request->which == img_request->obj_request_count);
1437 obj_request->which = BAD_WHICH;
1438 rbd_assert(obj_request_img_data_test(obj_request));
1439 rbd_assert(obj_request->img_request == img_request);
1440 obj_request->img_request = NULL;
1441 obj_request->callback = NULL;
1442 rbd_obj_request_put(obj_request);
1445 static bool obj_request_type_valid(enum obj_request_type type)
1448 case OBJ_REQUEST_NODATA:
1449 case OBJ_REQUEST_BIO:
1450 case OBJ_REQUEST_PAGES:
1457 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1458 struct rbd_obj_request *obj_request)
1460 dout("%s: osdc %p obj %p\n", __func__, osdc, obj_request);
1462 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1465 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1468 dout("%s: img %p\n", __func__, img_request);
1471 * If no error occurred, compute the aggregate transfer
1472 * count for the image request. We could instead use
1473 * atomic64_cmpxchg() to update it as each object request
1474 * completes; not clear which way is better off hand.
1476 if (!img_request->result) {
1477 struct rbd_obj_request *obj_request;
1480 for_each_obj_request(img_request, obj_request)
1481 xferred += obj_request->xferred;
1482 img_request->xferred = xferred;
1485 if (img_request->callback)
1486 img_request->callback(img_request);
1488 rbd_img_request_put(img_request);
1491 /* Caller is responsible for rbd_obj_request_destroy(obj_request) */
1493 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1495 dout("%s: obj %p\n", __func__, obj_request);
1497 return wait_for_completion_interruptible(&obj_request->completion);
1501 * The default/initial value for all image request flags is 0. Each
1502 * is conditionally set to 1 at image request initialization time
1503 * and currently never change thereafter.
1505 static void img_request_write_set(struct rbd_img_request *img_request)
1507 set_bit(IMG_REQ_WRITE, &img_request->flags);
1511 static bool img_request_write_test(struct rbd_img_request *img_request)
1514 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1517 static void img_request_child_set(struct rbd_img_request *img_request)
1519 set_bit(IMG_REQ_CHILD, &img_request->flags);
1523 static void img_request_child_clear(struct rbd_img_request *img_request)
1525 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1529 static bool img_request_child_test(struct rbd_img_request *img_request)
1532 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1535 static void img_request_layered_set(struct rbd_img_request *img_request)
1537 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1541 static void img_request_layered_clear(struct rbd_img_request *img_request)
1543 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1547 static bool img_request_layered_test(struct rbd_img_request *img_request)
1550 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1554 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1556 u64 xferred = obj_request->xferred;
1557 u64 length = obj_request->length;
1559 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1560 obj_request, obj_request->img_request, obj_request->result,
1563 * ENOENT means a hole in the image. We zero-fill the
1564 * entire length of the request. A short read also implies
1565 * zero-fill to the end of the request. Either way we
1566 * update the xferred count to indicate the whole request
1569 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1570 if (obj_request->result == -ENOENT) {
1571 if (obj_request->type == OBJ_REQUEST_BIO)
1572 zero_bio_chain(obj_request->bio_list, 0);
1574 zero_pages(obj_request->pages, 0, length);
1575 obj_request->result = 0;
1576 obj_request->xferred = length;
1577 } else if (xferred < length && !obj_request->result) {
1578 if (obj_request->type == OBJ_REQUEST_BIO)
1579 zero_bio_chain(obj_request->bio_list, xferred);
1581 zero_pages(obj_request->pages, xferred, length);
1582 obj_request->xferred = length;
1584 obj_request_done_set(obj_request);
1587 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1589 dout("%s: obj %p cb %p\n", __func__, obj_request,
1590 obj_request->callback);
1591 if (obj_request->callback)
1592 obj_request->callback(obj_request);
1594 complete_all(&obj_request->completion);
1597 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1599 dout("%s: obj %p\n", __func__, obj_request);
1600 obj_request_done_set(obj_request);
1603 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1605 struct rbd_img_request *img_request = NULL;
1606 struct rbd_device *rbd_dev = NULL;
1607 bool layered = false;
1609 if (obj_request_img_data_test(obj_request)) {
1610 img_request = obj_request->img_request;
1611 layered = img_request && img_request_layered_test(img_request);
1612 rbd_dev = img_request->rbd_dev;
1615 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1616 obj_request, img_request, obj_request->result,
1617 obj_request->xferred, obj_request->length);
1618 if (layered && obj_request->result == -ENOENT &&
1619 obj_request->img_offset < rbd_dev->parent_overlap)
1620 rbd_img_parent_read(obj_request);
1621 else if (img_request)
1622 rbd_img_obj_request_read_callback(obj_request);
1624 obj_request_done_set(obj_request);
1627 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1629 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1630 obj_request->result, obj_request->length);
1632 * There is no such thing as a successful short write. Set
1633 * it to our originally-requested length.
1635 obj_request->xferred = obj_request->length;
1636 obj_request_done_set(obj_request);
1640 * For a simple stat call there's nothing to do. We'll do more if
1641 * this is part of a write sequence for a layered image.
1643 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1645 dout("%s: obj %p\n", __func__, obj_request);
1646 obj_request_done_set(obj_request);
1649 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1650 struct ceph_msg *msg)
1652 struct rbd_obj_request *obj_request = osd_req->r_priv;
1655 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1656 rbd_assert(osd_req == obj_request->osd_req);
1657 if (obj_request_img_data_test(obj_request)) {
1658 rbd_assert(obj_request->img_request);
1659 rbd_assert(obj_request->which != BAD_WHICH);
1661 rbd_assert(obj_request->which == BAD_WHICH);
1664 if (osd_req->r_result < 0)
1665 obj_request->result = osd_req->r_result;
1667 BUG_ON(osd_req->r_num_ops > 2);
1670 * We support a 64-bit length, but ultimately it has to be
1671 * passed to blk_end_request(), which takes an unsigned int.
1673 obj_request->xferred = osd_req->r_reply_op_len[0];
1674 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1675 opcode = osd_req->r_ops[0].op;
1677 case CEPH_OSD_OP_READ:
1678 rbd_osd_read_callback(obj_request);
1680 case CEPH_OSD_OP_WRITE:
1681 rbd_osd_write_callback(obj_request);
1683 case CEPH_OSD_OP_STAT:
1684 rbd_osd_stat_callback(obj_request);
1686 case CEPH_OSD_OP_CALL:
1687 case CEPH_OSD_OP_NOTIFY_ACK:
1688 case CEPH_OSD_OP_WATCH:
1689 rbd_osd_trivial_callback(obj_request);
1692 rbd_warn(NULL, "%s: unsupported op %hu\n",
1693 obj_request->object_name, (unsigned short) opcode);
1697 if (obj_request_done_test(obj_request))
1698 rbd_obj_request_complete(obj_request);
1701 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1703 struct rbd_img_request *img_request = obj_request->img_request;
1704 struct ceph_osd_request *osd_req = obj_request->osd_req;
1707 rbd_assert(osd_req != NULL);
1709 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1710 ceph_osdc_build_request(osd_req, obj_request->offset,
1711 NULL, snap_id, NULL);
1714 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1716 struct rbd_img_request *img_request = obj_request->img_request;
1717 struct ceph_osd_request *osd_req = obj_request->osd_req;
1718 struct ceph_snap_context *snapc;
1719 struct timespec mtime = CURRENT_TIME;
1721 rbd_assert(osd_req != NULL);
1723 snapc = img_request ? img_request->snapc : NULL;
1724 ceph_osdc_build_request(osd_req, obj_request->offset,
1725 snapc, CEPH_NOSNAP, &mtime);
1728 static struct ceph_osd_request *rbd_osd_req_create(
1729 struct rbd_device *rbd_dev,
1731 struct rbd_obj_request *obj_request)
1733 struct ceph_snap_context *snapc = NULL;
1734 struct ceph_osd_client *osdc;
1735 struct ceph_osd_request *osd_req;
1737 if (obj_request_img_data_test(obj_request)) {
1738 struct rbd_img_request *img_request = obj_request->img_request;
1740 rbd_assert(write_request ==
1741 img_request_write_test(img_request));
1743 snapc = img_request->snapc;
1746 /* Allocate and initialize the request, for the single op */
1748 osdc = &rbd_dev->rbd_client->client->osdc;
1749 osd_req = ceph_osdc_alloc_request(osdc, snapc, 1, false, GFP_ATOMIC);
1751 return NULL; /* ENOMEM */
1754 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1756 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1758 osd_req->r_callback = rbd_osd_req_callback;
1759 osd_req->r_priv = obj_request;
1761 osd_req->r_oid_len = strlen(obj_request->object_name);
1762 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1763 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1765 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1771 * Create a copyup osd request based on the information in the
1772 * object request supplied. A copyup request has two osd ops,
1773 * a copyup method call, and a "normal" write request.
1775 static struct ceph_osd_request *
1776 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1778 struct rbd_img_request *img_request;
1779 struct ceph_snap_context *snapc;
1780 struct rbd_device *rbd_dev;
1781 struct ceph_osd_client *osdc;
1782 struct ceph_osd_request *osd_req;
1784 rbd_assert(obj_request_img_data_test(obj_request));
1785 img_request = obj_request->img_request;
1786 rbd_assert(img_request);
1787 rbd_assert(img_request_write_test(img_request));
1789 /* Allocate and initialize the request, for the two ops */
1791 snapc = img_request->snapc;
1792 rbd_dev = img_request->rbd_dev;
1793 osdc = &rbd_dev->rbd_client->client->osdc;
1794 osd_req = ceph_osdc_alloc_request(osdc, snapc, 2, false, GFP_ATOMIC);
1796 return NULL; /* ENOMEM */
1798 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1799 osd_req->r_callback = rbd_osd_req_callback;
1800 osd_req->r_priv = obj_request;
1802 osd_req->r_oid_len = strlen(obj_request->object_name);
1803 rbd_assert(osd_req->r_oid_len < sizeof (osd_req->r_oid));
1804 memcpy(osd_req->r_oid, obj_request->object_name, osd_req->r_oid_len);
1806 osd_req->r_file_layout = rbd_dev->layout; /* struct */
1812 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1814 ceph_osdc_put_request(osd_req);
1817 /* object_name is assumed to be a non-null pointer and NUL-terminated */
1819 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
1820 u64 offset, u64 length,
1821 enum obj_request_type type)
1823 struct rbd_obj_request *obj_request;
1827 rbd_assert(obj_request_type_valid(type));
1829 size = strlen(object_name) + 1;
1830 name = kmalloc(size, GFP_KERNEL);
1834 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_KERNEL);
1840 obj_request->object_name = memcpy(name, object_name, size);
1841 obj_request->offset = offset;
1842 obj_request->length = length;
1843 obj_request->flags = 0;
1844 obj_request->which = BAD_WHICH;
1845 obj_request->type = type;
1846 INIT_LIST_HEAD(&obj_request->links);
1847 init_completion(&obj_request->completion);
1848 kref_init(&obj_request->kref);
1850 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
1851 offset, length, (int)type, obj_request);
1856 static void rbd_obj_request_destroy(struct kref *kref)
1858 struct rbd_obj_request *obj_request;
1860 obj_request = container_of(kref, struct rbd_obj_request, kref);
1862 dout("%s: obj %p\n", __func__, obj_request);
1864 rbd_assert(obj_request->img_request == NULL);
1865 rbd_assert(obj_request->which == BAD_WHICH);
1867 if (obj_request->osd_req)
1868 rbd_osd_req_destroy(obj_request->osd_req);
1870 rbd_assert(obj_request_type_valid(obj_request->type));
1871 switch (obj_request->type) {
1872 case OBJ_REQUEST_NODATA:
1873 break; /* Nothing to do */
1874 case OBJ_REQUEST_BIO:
1875 if (obj_request->bio_list)
1876 bio_chain_put(obj_request->bio_list);
1878 case OBJ_REQUEST_PAGES:
1879 if (obj_request->pages)
1880 ceph_release_page_vector(obj_request->pages,
1881 obj_request->page_count);
1885 kfree(obj_request->object_name);
1886 obj_request->object_name = NULL;
1887 kmem_cache_free(rbd_obj_request_cache, obj_request);
1890 /* It's OK to call this for a device with no parent */
1892 static void rbd_spec_put(struct rbd_spec *spec);
1893 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1895 rbd_dev_remove_parent(rbd_dev);
1896 rbd_spec_put(rbd_dev->parent_spec);
1897 rbd_dev->parent_spec = NULL;
1898 rbd_dev->parent_overlap = 0;
1902 * Parent image reference counting is used to determine when an
1903 * image's parent fields can be safely torn down--after there are no
1904 * more in-flight requests to the parent image. When the last
1905 * reference is dropped, cleaning them up is safe.
1907 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1911 if (!rbd_dev->parent_spec)
1914 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1918 /* Last reference; clean up parent data structures */
1921 rbd_dev_unparent(rbd_dev);
1923 rbd_warn(rbd_dev, "parent reference underflow\n");
1927 * If an image has a non-zero parent overlap, get a reference to its
1930 * We must get the reference before checking for the overlap to
1931 * coordinate properly with zeroing the parent overlap in
1932 * rbd_dev_v2_parent_info() when an image gets flattened. We
1933 * drop it again if there is no overlap.
1935 * Returns true if the rbd device has a parent with a non-zero
1936 * overlap and a reference for it was successfully taken, or
1939 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1943 if (!rbd_dev->parent_spec)
1946 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1947 if (counter > 0 && rbd_dev->parent_overlap)
1950 /* Image was flattened, but parent is not yet torn down */
1953 rbd_warn(rbd_dev, "parent reference overflow\n");
1959 * Caller is responsible for filling in the list of object requests
1960 * that comprises the image request, and the Linux request pointer
1961 * (if there is one).
1963 static struct rbd_img_request *rbd_img_request_create(
1964 struct rbd_device *rbd_dev,
1965 u64 offset, u64 length,
1968 struct rbd_img_request *img_request;
1970 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_ATOMIC);
1974 if (write_request) {
1975 down_read(&rbd_dev->header_rwsem);
1976 ceph_get_snap_context(rbd_dev->header.snapc);
1977 up_read(&rbd_dev->header_rwsem);
1980 img_request->rq = NULL;
1981 img_request->rbd_dev = rbd_dev;
1982 img_request->offset = offset;
1983 img_request->length = length;
1984 img_request->flags = 0;
1985 if (write_request) {
1986 img_request_write_set(img_request);
1987 img_request->snapc = rbd_dev->header.snapc;
1989 img_request->snap_id = rbd_dev->spec->snap_id;
1991 if (rbd_dev_parent_get(rbd_dev))
1992 img_request_layered_set(img_request);
1993 spin_lock_init(&img_request->completion_lock);
1994 img_request->next_completion = 0;
1995 img_request->callback = NULL;
1996 img_request->result = 0;
1997 img_request->obj_request_count = 0;
1998 INIT_LIST_HEAD(&img_request->obj_requests);
1999 kref_init(&img_request->kref);
2001 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2002 write_request ? "write" : "read", offset, length,
2008 static void rbd_img_request_destroy(struct kref *kref)
2010 struct rbd_img_request *img_request;
2011 struct rbd_obj_request *obj_request;
2012 struct rbd_obj_request *next_obj_request;
2014 img_request = container_of(kref, struct rbd_img_request, kref);
2016 dout("%s: img %p\n", __func__, img_request);
2018 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2019 rbd_img_obj_request_del(img_request, obj_request);
2020 rbd_assert(img_request->obj_request_count == 0);
2022 if (img_request_layered_test(img_request)) {
2023 img_request_layered_clear(img_request);
2024 rbd_dev_parent_put(img_request->rbd_dev);
2027 if (img_request_write_test(img_request))
2028 ceph_put_snap_context(img_request->snapc);
2030 kmem_cache_free(rbd_img_request_cache, img_request);
2033 static struct rbd_img_request *rbd_parent_request_create(
2034 struct rbd_obj_request *obj_request,
2035 u64 img_offset, u64 length)
2037 struct rbd_img_request *parent_request;
2038 struct rbd_device *rbd_dev;
2040 rbd_assert(obj_request->img_request);
2041 rbd_dev = obj_request->img_request->rbd_dev;
2043 parent_request = rbd_img_request_create(rbd_dev->parent,
2044 img_offset, length, false);
2045 if (!parent_request)
2048 img_request_child_set(parent_request);
2049 rbd_obj_request_get(obj_request);
2050 parent_request->obj_request = obj_request;
2052 return parent_request;
2055 static void rbd_parent_request_destroy(struct kref *kref)
2057 struct rbd_img_request *parent_request;
2058 struct rbd_obj_request *orig_request;
2060 parent_request = container_of(kref, struct rbd_img_request, kref);
2061 orig_request = parent_request->obj_request;
2063 parent_request->obj_request = NULL;
2064 rbd_obj_request_put(orig_request);
2065 img_request_child_clear(parent_request);
2067 rbd_img_request_destroy(kref);
2070 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2072 struct rbd_img_request *img_request;
2073 unsigned int xferred;
2077 rbd_assert(obj_request_img_data_test(obj_request));
2078 img_request = obj_request->img_request;
2080 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2081 xferred = (unsigned int)obj_request->xferred;
2082 result = obj_request->result;
2084 struct rbd_device *rbd_dev = img_request->rbd_dev;
2086 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)\n",
2087 img_request_write_test(img_request) ? "write" : "read",
2088 obj_request->length, obj_request->img_offset,
2089 obj_request->offset);
2090 rbd_warn(rbd_dev, " result %d xferred %x\n",
2092 if (!img_request->result)
2093 img_request->result = result;
2096 /* Image object requests don't own their page array */
2098 if (obj_request->type == OBJ_REQUEST_PAGES) {
2099 obj_request->pages = NULL;
2100 obj_request->page_count = 0;
2103 if (img_request_child_test(img_request)) {
2104 rbd_assert(img_request->obj_request != NULL);
2105 more = obj_request->which < img_request->obj_request_count - 1;
2107 rbd_assert(img_request->rq != NULL);
2108 more = blk_end_request(img_request->rq, result, xferred);
2114 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2116 struct rbd_img_request *img_request;
2117 u32 which = obj_request->which;
2120 rbd_assert(obj_request_img_data_test(obj_request));
2121 img_request = obj_request->img_request;
2123 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2124 rbd_assert(img_request != NULL);
2125 rbd_assert(img_request->obj_request_count > 0);
2126 rbd_assert(which != BAD_WHICH);
2127 rbd_assert(which < img_request->obj_request_count);
2128 rbd_assert(which >= img_request->next_completion);
2130 spin_lock_irq(&img_request->completion_lock);
2131 if (which != img_request->next_completion)
2134 for_each_obj_request_from(img_request, obj_request) {
2136 rbd_assert(which < img_request->obj_request_count);
2138 if (!obj_request_done_test(obj_request))
2140 more = rbd_img_obj_end_request(obj_request);
2144 rbd_assert(more ^ (which == img_request->obj_request_count));
2145 img_request->next_completion = which;
2147 spin_unlock_irq(&img_request->completion_lock);
2150 rbd_img_request_complete(img_request);
2154 * Split up an image request into one or more object requests, each
2155 * to a different object. The "type" parameter indicates whether
2156 * "data_desc" is the pointer to the head of a list of bio
2157 * structures, or the base of a page array. In either case this
2158 * function assumes data_desc describes memory sufficient to hold
2159 * all data described by the image request.
2161 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2162 enum obj_request_type type,
2165 struct rbd_device *rbd_dev = img_request->rbd_dev;
2166 struct rbd_obj_request *obj_request = NULL;
2167 struct rbd_obj_request *next_obj_request;
2168 bool write_request = img_request_write_test(img_request);
2169 struct bio *bio_list;
2170 unsigned int bio_offset = 0;
2171 struct page **pages;
2176 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2177 (int)type, data_desc);
2179 opcode = write_request ? CEPH_OSD_OP_WRITE : CEPH_OSD_OP_READ;
2180 img_offset = img_request->offset;
2181 resid = img_request->length;
2182 rbd_assert(resid > 0);
2184 if (type == OBJ_REQUEST_BIO) {
2185 bio_list = data_desc;
2186 rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT);
2188 rbd_assert(type == OBJ_REQUEST_PAGES);
2193 struct ceph_osd_request *osd_req;
2194 const char *object_name;
2198 object_name = rbd_segment_name(rbd_dev, img_offset);
2201 offset = rbd_segment_offset(rbd_dev, img_offset);
2202 length = rbd_segment_length(rbd_dev, img_offset, resid);
2203 obj_request = rbd_obj_request_create(object_name,
2204 offset, length, type);
2205 /* object request has its own copy of the object name */
2206 rbd_segment_name_free(object_name);
2210 if (type == OBJ_REQUEST_BIO) {
2211 unsigned int clone_size;
2213 rbd_assert(length <= (u64)UINT_MAX);
2214 clone_size = (unsigned int)length;
2215 obj_request->bio_list =
2216 bio_chain_clone_range(&bio_list,
2220 if (!obj_request->bio_list)
2223 unsigned int page_count;
2225 obj_request->pages = pages;
2226 page_count = (u32)calc_pages_for(offset, length);
2227 obj_request->page_count = page_count;
2228 if ((offset + length) & ~PAGE_MASK)
2229 page_count--; /* more on last page */
2230 pages += page_count;
2233 osd_req = rbd_osd_req_create(rbd_dev, write_request,
2237 obj_request->osd_req = osd_req;
2238 obj_request->callback = rbd_img_obj_callback;
2240 osd_req_op_extent_init(osd_req, 0, opcode, offset, length,
2242 if (type == OBJ_REQUEST_BIO)
2243 osd_req_op_extent_osd_data_bio(osd_req, 0,
2244 obj_request->bio_list, length);
2246 osd_req_op_extent_osd_data_pages(osd_req, 0,
2247 obj_request->pages, length,
2248 offset & ~PAGE_MASK, false, false);
2251 rbd_osd_req_format_write(obj_request);
2253 rbd_osd_req_format_read(obj_request);
2255 obj_request->img_offset = img_offset;
2256 rbd_img_obj_request_add(img_request, obj_request);
2258 img_offset += length;
2265 rbd_obj_request_put(obj_request);
2267 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2268 rbd_obj_request_put(obj_request);
2274 rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2276 struct rbd_img_request *img_request;
2277 struct rbd_device *rbd_dev;
2278 struct page **pages;
2281 rbd_assert(obj_request->type == OBJ_REQUEST_BIO);
2282 rbd_assert(obj_request_img_data_test(obj_request));
2283 img_request = obj_request->img_request;
2284 rbd_assert(img_request);
2286 rbd_dev = img_request->rbd_dev;
2287 rbd_assert(rbd_dev);
2289 pages = obj_request->copyup_pages;
2290 rbd_assert(pages != NULL);
2291 obj_request->copyup_pages = NULL;
2292 page_count = obj_request->copyup_page_count;
2293 rbd_assert(page_count);
2294 obj_request->copyup_page_count = 0;
2295 ceph_release_page_vector(pages, page_count);
2298 * We want the transfer count to reflect the size of the
2299 * original write request. There is no such thing as a
2300 * successful short write, so if the request was successful
2301 * we can just set it to the originally-requested length.
2303 if (!obj_request->result)
2304 obj_request->xferred = obj_request->length;
2306 /* Finish up with the normal image object callback */
2308 rbd_img_obj_callback(obj_request);
2312 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2314 struct rbd_obj_request *orig_request;
2315 struct ceph_osd_request *osd_req;
2316 struct ceph_osd_client *osdc;
2317 struct rbd_device *rbd_dev;
2318 struct page **pages;
2325 rbd_assert(img_request_child_test(img_request));
2327 /* First get what we need from the image request */
2329 pages = img_request->copyup_pages;
2330 rbd_assert(pages != NULL);
2331 img_request->copyup_pages = NULL;
2332 page_count = img_request->copyup_page_count;
2333 rbd_assert(page_count);
2334 img_request->copyup_page_count = 0;
2336 orig_request = img_request->obj_request;
2337 rbd_assert(orig_request != NULL);
2338 rbd_assert(obj_request_type_valid(orig_request->type));
2339 img_result = img_request->result;
2340 parent_length = img_request->length;
2341 rbd_assert(parent_length == img_request->xferred);
2342 rbd_img_request_put(img_request);
2344 rbd_assert(orig_request->img_request);
2345 rbd_dev = orig_request->img_request->rbd_dev;
2346 rbd_assert(rbd_dev);
2349 * If the overlap has become 0 (most likely because the
2350 * image has been flattened) we need to free the pages
2351 * and re-submit the original write request.
2353 if (!rbd_dev->parent_overlap) {
2354 struct ceph_osd_client *osdc;
2356 ceph_release_page_vector(pages, page_count);
2357 osdc = &rbd_dev->rbd_client->client->osdc;
2358 img_result = rbd_obj_request_submit(osdc, orig_request);
2367 * The original osd request is of no use to use any more.
2368 * We need a new one that can hold the two ops in a copyup
2369 * request. Allocate the new copyup osd request for the
2370 * original request, and release the old one.
2372 img_result = -ENOMEM;
2373 osd_req = rbd_osd_req_create_copyup(orig_request);
2376 rbd_osd_req_destroy(orig_request->osd_req);
2377 orig_request->osd_req = osd_req;
2378 orig_request->copyup_pages = pages;
2379 orig_request->copyup_page_count = page_count;
2381 /* Initialize the copyup op */
2383 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2384 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2387 /* Then the original write request op */
2389 offset = orig_request->offset;
2390 length = orig_request->length;
2391 osd_req_op_extent_init(osd_req, 1, CEPH_OSD_OP_WRITE,
2392 offset, length, 0, 0);
2393 if (orig_request->type == OBJ_REQUEST_BIO)
2394 osd_req_op_extent_osd_data_bio(osd_req, 1,
2395 orig_request->bio_list, length);
2397 osd_req_op_extent_osd_data_pages(osd_req, 1,
2398 orig_request->pages, length,
2399 offset & ~PAGE_MASK, false, false);
2401 rbd_osd_req_format_write(orig_request);
2403 /* All set, send it off. */
2405 orig_request->callback = rbd_img_obj_copyup_callback;
2406 osdc = &rbd_dev->rbd_client->client->osdc;
2407 img_result = rbd_obj_request_submit(osdc, orig_request);
2411 /* Record the error code and complete the request */
2413 orig_request->result = img_result;
2414 orig_request->xferred = 0;
2415 obj_request_done_set(orig_request);
2416 rbd_obj_request_complete(orig_request);
2420 * Read from the parent image the range of data that covers the
2421 * entire target of the given object request. This is used for
2422 * satisfying a layered image write request when the target of an
2423 * object request from the image request does not exist.
2425 * A page array big enough to hold the returned data is allocated
2426 * and supplied to rbd_img_request_fill() as the "data descriptor."
2427 * When the read completes, this page array will be transferred to
2428 * the original object request for the copyup operation.
2430 * If an error occurs, record it as the result of the original
2431 * object request and mark it done so it gets completed.
2433 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2435 struct rbd_img_request *img_request = NULL;
2436 struct rbd_img_request *parent_request = NULL;
2437 struct rbd_device *rbd_dev;
2440 struct page **pages = NULL;
2444 rbd_assert(obj_request_img_data_test(obj_request));
2445 rbd_assert(obj_request_type_valid(obj_request->type));
2447 img_request = obj_request->img_request;
2448 rbd_assert(img_request != NULL);
2449 rbd_dev = img_request->rbd_dev;
2450 rbd_assert(rbd_dev->parent != NULL);
2453 * Determine the byte range covered by the object in the
2454 * child image to which the original request was to be sent.
2456 img_offset = obj_request->img_offset - obj_request->offset;
2457 length = (u64)1 << rbd_dev->header.obj_order;
2460 * There is no defined parent data beyond the parent
2461 * overlap, so limit what we read at that boundary if
2464 if (img_offset + length > rbd_dev->parent_overlap) {
2465 rbd_assert(img_offset < rbd_dev->parent_overlap);
2466 length = rbd_dev->parent_overlap - img_offset;
2470 * Allocate a page array big enough to receive the data read
2473 page_count = (u32)calc_pages_for(0, length);
2474 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2475 if (IS_ERR(pages)) {
2476 result = PTR_ERR(pages);
2482 parent_request = rbd_parent_request_create(obj_request,
2483 img_offset, length);
2484 if (!parent_request)
2487 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2490 parent_request->copyup_pages = pages;
2491 parent_request->copyup_page_count = page_count;
2493 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2494 result = rbd_img_request_submit(parent_request);
2498 parent_request->copyup_pages = NULL;
2499 parent_request->copyup_page_count = 0;
2500 parent_request->obj_request = NULL;
2501 rbd_obj_request_put(obj_request);
2504 ceph_release_page_vector(pages, page_count);
2506 rbd_img_request_put(parent_request);
2507 obj_request->result = result;
2508 obj_request->xferred = 0;
2509 obj_request_done_set(obj_request);
2514 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2516 struct rbd_obj_request *orig_request;
2517 struct rbd_device *rbd_dev;
2520 rbd_assert(!obj_request_img_data_test(obj_request));
2523 * All we need from the object request is the original
2524 * request and the result of the STAT op. Grab those, then
2525 * we're done with the request.
2527 orig_request = obj_request->obj_request;
2528 obj_request->obj_request = NULL;
2529 rbd_assert(orig_request);
2530 rbd_assert(orig_request->img_request);
2532 result = obj_request->result;
2533 obj_request->result = 0;
2535 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2536 obj_request, orig_request, result,
2537 obj_request->xferred, obj_request->length);
2538 rbd_obj_request_put(obj_request);
2541 * If the overlap has become 0 (most likely because the
2542 * image has been flattened) we need to free the pages
2543 * and re-submit the original write request.
2545 rbd_dev = orig_request->img_request->rbd_dev;
2546 if (!rbd_dev->parent_overlap) {
2547 struct ceph_osd_client *osdc;
2549 rbd_obj_request_put(orig_request);
2550 osdc = &rbd_dev->rbd_client->client->osdc;
2551 result = rbd_obj_request_submit(osdc, orig_request);
2557 * Our only purpose here is to determine whether the object
2558 * exists, and we don't want to treat the non-existence as
2559 * an error. If something else comes back, transfer the
2560 * error to the original request and complete it now.
2563 obj_request_existence_set(orig_request, true);
2564 } else if (result == -ENOENT) {
2565 obj_request_existence_set(orig_request, false);
2566 } else if (result) {
2567 orig_request->result = result;
2572 * Resubmit the original request now that we have recorded
2573 * whether the target object exists.
2575 orig_request->result = rbd_img_obj_request_submit(orig_request);
2577 if (orig_request->result)
2578 rbd_obj_request_complete(orig_request);
2579 rbd_obj_request_put(orig_request);
2582 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2584 struct rbd_obj_request *stat_request;
2585 struct rbd_device *rbd_dev;
2586 struct ceph_osd_client *osdc;
2587 struct page **pages = NULL;
2593 * The response data for a STAT call consists of:
2600 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2601 page_count = (u32)calc_pages_for(0, size);
2602 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2604 return PTR_ERR(pages);
2607 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2612 rbd_obj_request_get(obj_request);
2613 stat_request->obj_request = obj_request;
2614 stat_request->pages = pages;
2615 stat_request->page_count = page_count;
2617 rbd_assert(obj_request->img_request);
2618 rbd_dev = obj_request->img_request->rbd_dev;
2619 stat_request->osd_req = rbd_osd_req_create(rbd_dev, false,
2621 if (!stat_request->osd_req)
2623 stat_request->callback = rbd_img_obj_exists_callback;
2625 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT);
2626 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2628 rbd_osd_req_format_read(stat_request);
2630 osdc = &rbd_dev->rbd_client->client->osdc;
2631 ret = rbd_obj_request_submit(osdc, stat_request);
2634 rbd_obj_request_put(obj_request);
2639 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2641 struct rbd_img_request *img_request;
2642 struct rbd_device *rbd_dev;
2645 rbd_assert(obj_request_img_data_test(obj_request));
2647 img_request = obj_request->img_request;
2648 rbd_assert(img_request);
2649 rbd_dev = img_request->rbd_dev;
2652 * Only writes to layered images need special handling.
2653 * Reads and non-layered writes are simple object requests.
2654 * Layered writes that start beyond the end of the overlap
2655 * with the parent have no parent data, so they too are
2656 * simple object requests. Finally, if the target object is
2657 * known to already exist, its parent data has already been
2658 * copied, so a write to the object can also be handled as a
2659 * simple object request.
2661 if (!img_request_write_test(img_request) ||
2662 !img_request_layered_test(img_request) ||
2663 rbd_dev->parent_overlap <= obj_request->img_offset ||
2664 ((known = obj_request_known_test(obj_request)) &&
2665 obj_request_exists_test(obj_request))) {
2667 struct rbd_device *rbd_dev;
2668 struct ceph_osd_client *osdc;
2670 rbd_dev = obj_request->img_request->rbd_dev;
2671 osdc = &rbd_dev->rbd_client->client->osdc;
2673 return rbd_obj_request_submit(osdc, obj_request);
2677 * It's a layered write. The target object might exist but
2678 * we may not know that yet. If we know it doesn't exist,
2679 * start by reading the data for the full target object from
2680 * the parent so we can use it for a copyup to the target.
2683 return rbd_img_obj_parent_read_full(obj_request);
2685 /* We don't know whether the target exists. Go find out. */
2687 return rbd_img_obj_exists_submit(obj_request);
2690 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2692 struct rbd_obj_request *obj_request;
2693 struct rbd_obj_request *next_obj_request;
2695 dout("%s: img %p\n", __func__, img_request);
2696 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2699 ret = rbd_img_obj_request_submit(obj_request);
2707 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2709 struct rbd_obj_request *obj_request;
2710 struct rbd_device *rbd_dev;
2715 rbd_assert(img_request_child_test(img_request));
2717 /* First get what we need from the image request and release it */
2719 obj_request = img_request->obj_request;
2720 img_xferred = img_request->xferred;
2721 img_result = img_request->result;
2722 rbd_img_request_put(img_request);
2725 * If the overlap has become 0 (most likely because the
2726 * image has been flattened) we need to re-submit the
2729 rbd_assert(obj_request);
2730 rbd_assert(obj_request->img_request);
2731 rbd_dev = obj_request->img_request->rbd_dev;
2732 if (!rbd_dev->parent_overlap) {
2733 struct ceph_osd_client *osdc;
2735 osdc = &rbd_dev->rbd_client->client->osdc;
2736 img_result = rbd_obj_request_submit(osdc, obj_request);
2741 obj_request->result = img_result;
2742 if (obj_request->result)
2746 * We need to zero anything beyond the parent overlap
2747 * boundary. Since rbd_img_obj_request_read_callback()
2748 * will zero anything beyond the end of a short read, an
2749 * easy way to do this is to pretend the data from the
2750 * parent came up short--ending at the overlap boundary.
2752 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
2753 obj_end = obj_request->img_offset + obj_request->length;
2754 if (obj_end > rbd_dev->parent_overlap) {
2757 if (obj_request->img_offset < rbd_dev->parent_overlap)
2758 xferred = rbd_dev->parent_overlap -
2759 obj_request->img_offset;
2761 obj_request->xferred = min(img_xferred, xferred);
2763 obj_request->xferred = img_xferred;
2766 rbd_img_obj_request_read_callback(obj_request);
2767 rbd_obj_request_complete(obj_request);
2770 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
2772 struct rbd_img_request *img_request;
2775 rbd_assert(obj_request_img_data_test(obj_request));
2776 rbd_assert(obj_request->img_request != NULL);
2777 rbd_assert(obj_request->result == (s32) -ENOENT);
2778 rbd_assert(obj_request_type_valid(obj_request->type));
2780 /* rbd_read_finish(obj_request, obj_request->length); */
2781 img_request = rbd_parent_request_create(obj_request,
2782 obj_request->img_offset,
2783 obj_request->length);
2788 if (obj_request->type == OBJ_REQUEST_BIO)
2789 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
2790 obj_request->bio_list);
2792 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
2793 obj_request->pages);
2797 img_request->callback = rbd_img_parent_read_callback;
2798 result = rbd_img_request_submit(img_request);
2805 rbd_img_request_put(img_request);
2806 obj_request->result = result;
2807 obj_request->xferred = 0;
2808 obj_request_done_set(obj_request);
2811 static int rbd_obj_notify_ack(struct rbd_device *rbd_dev, u64 notify_id)
2813 struct rbd_obj_request *obj_request;
2814 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2817 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2818 OBJ_REQUEST_NODATA);
2823 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2824 if (!obj_request->osd_req)
2826 obj_request->callback = rbd_obj_request_put;
2828 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
2830 rbd_osd_req_format_read(obj_request);
2832 ret = rbd_obj_request_submit(osdc, obj_request);
2835 rbd_obj_request_put(obj_request);
2840 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
2842 struct rbd_device *rbd_dev = (struct rbd_device *)data;
2848 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
2849 rbd_dev->header_name, (unsigned long long)notify_id,
2850 (unsigned int)opcode);
2851 ret = rbd_dev_refresh(rbd_dev);
2853 rbd_warn(rbd_dev, ": header refresh error (%d)\n", ret);
2855 rbd_obj_notify_ack(rbd_dev, notify_id);
2859 * Request sync osd watch/unwatch. The value of "start" determines
2860 * whether a watch request is being initiated or torn down.
2862 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev, bool start)
2864 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2865 struct rbd_obj_request *obj_request;
2868 rbd_assert(start ^ !!rbd_dev->watch_event);
2869 rbd_assert(start ^ !!rbd_dev->watch_request);
2872 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
2873 &rbd_dev->watch_event);
2876 rbd_assert(rbd_dev->watch_event != NULL);
2880 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
2881 OBJ_REQUEST_NODATA);
2885 obj_request->osd_req = rbd_osd_req_create(rbd_dev, true, obj_request);
2886 if (!obj_request->osd_req)
2890 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
2892 ceph_osdc_unregister_linger_request(osdc,
2893 rbd_dev->watch_request->osd_req);
2895 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
2896 rbd_dev->watch_event->cookie, 0, start ? 1 : 0);
2897 rbd_osd_req_format_write(obj_request);
2899 ret = rbd_obj_request_submit(osdc, obj_request);
2902 ret = rbd_obj_request_wait(obj_request);
2905 ret = obj_request->result;
2910 * A watch request is set to linger, so the underlying osd
2911 * request won't go away until we unregister it. We retain
2912 * a pointer to the object request during that time (in
2913 * rbd_dev->watch_request), so we'll keep a reference to
2914 * it. We'll drop that reference (below) after we've
2918 rbd_dev->watch_request = obj_request;
2923 /* We have successfully torn down the watch request */
2925 rbd_obj_request_put(rbd_dev->watch_request);
2926 rbd_dev->watch_request = NULL;
2928 /* Cancel the event if we're tearing down, or on error */
2929 ceph_osdc_cancel_event(rbd_dev->watch_event);
2930 rbd_dev->watch_event = NULL;
2932 rbd_obj_request_put(obj_request);
2938 * Synchronous osd object method call. Returns the number of bytes
2939 * returned in the outbound buffer, or a negative error code.
2941 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
2942 const char *object_name,
2943 const char *class_name,
2944 const char *method_name,
2945 const void *outbound,
2946 size_t outbound_size,
2948 size_t inbound_size)
2950 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2951 struct rbd_obj_request *obj_request;
2952 struct page **pages;
2957 * Method calls are ultimately read operations. The result
2958 * should placed into the inbound buffer provided. They
2959 * also supply outbound data--parameters for the object
2960 * method. Currently if this is present it will be a
2963 page_count = (u32)calc_pages_for(0, inbound_size);
2964 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
2966 return PTR_ERR(pages);
2969 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
2974 obj_request->pages = pages;
2975 obj_request->page_count = page_count;
2977 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
2978 if (!obj_request->osd_req)
2981 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
2982 class_name, method_name);
2983 if (outbound_size) {
2984 struct ceph_pagelist *pagelist;
2986 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
2990 ceph_pagelist_init(pagelist);
2991 ceph_pagelist_append(pagelist, outbound, outbound_size);
2992 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
2995 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
2996 obj_request->pages, inbound_size,
2998 rbd_osd_req_format_read(obj_request);
3000 ret = rbd_obj_request_submit(osdc, obj_request);
3003 ret = rbd_obj_request_wait(obj_request);
3007 ret = obj_request->result;
3011 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3012 ret = (int)obj_request->xferred;
3013 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3016 rbd_obj_request_put(obj_request);
3018 ceph_release_page_vector(pages, page_count);
3023 static void rbd_request_fn(struct request_queue *q)
3024 __releases(q->queue_lock) __acquires(q->queue_lock)
3026 struct rbd_device *rbd_dev = q->queuedata;
3027 bool read_only = rbd_dev->mapping.read_only;
3031 while ((rq = blk_fetch_request(q))) {
3032 bool write_request = rq_data_dir(rq) == WRITE;
3033 struct rbd_img_request *img_request;
3037 /* Ignore any non-FS requests that filter through. */
3039 if (rq->cmd_type != REQ_TYPE_FS) {
3040 dout("%s: non-fs request type %d\n", __func__,
3041 (int) rq->cmd_type);
3042 __blk_end_request_all(rq, 0);
3046 /* Ignore/skip any zero-length requests */
3048 offset = (u64) blk_rq_pos(rq) << SECTOR_SHIFT;
3049 length = (u64) blk_rq_bytes(rq);
3052 dout("%s: zero-length request\n", __func__);
3053 __blk_end_request_all(rq, 0);
3057 spin_unlock_irq(q->queue_lock);
3059 /* Disallow writes to a read-only device */
3061 if (write_request) {
3065 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3069 * Quit early if the mapped snapshot no longer
3070 * exists. It's still possible the snapshot will
3071 * have disappeared by the time our request arrives
3072 * at the osd, but there's no sense in sending it if
3075 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3076 dout("request for non-existent snapshot");
3077 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3083 if (offset && length > U64_MAX - offset + 1) {
3084 rbd_warn(rbd_dev, "bad request range (%llu~%llu)\n",
3086 goto end_request; /* Shouldn't happen */
3090 if (offset + length > rbd_dev->mapping.size) {
3091 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)\n",
3092 offset, length, rbd_dev->mapping.size);
3097 img_request = rbd_img_request_create(rbd_dev, offset, length,
3102 img_request->rq = rq;
3104 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3107 result = rbd_img_request_submit(img_request);
3109 rbd_img_request_put(img_request);
3111 spin_lock_irq(q->queue_lock);
3113 rbd_warn(rbd_dev, "%s %llx at %llx result %d\n",
3114 write_request ? "write" : "read",
3115 length, offset, result);
3117 __blk_end_request_all(rq, result);
3123 * a queue callback. Makes sure that we don't create a bio that spans across
3124 * multiple osd objects. One exception would be with a single page bios,
3125 * which we handle later at bio_chain_clone_range()
3127 static int rbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bmd,
3128 struct bio_vec *bvec)
3130 struct rbd_device *rbd_dev = q->queuedata;
3131 sector_t sector_offset;
3132 sector_t sectors_per_obj;
3133 sector_t obj_sector_offset;
3137 * Find how far into its rbd object the partition-relative
3138 * bio start sector is to offset relative to the enclosing
3141 sector_offset = get_start_sect(bmd->bi_bdev) + bmd->bi_sector;
3142 sectors_per_obj = 1 << (rbd_dev->header.obj_order - SECTOR_SHIFT);
3143 obj_sector_offset = sector_offset & (sectors_per_obj - 1);
3146 * Compute the number of bytes from that offset to the end
3147 * of the object. Account for what's already used by the bio.
3149 ret = (int) (sectors_per_obj - obj_sector_offset) << SECTOR_SHIFT;
3150 if (ret > bmd->bi_size)
3151 ret -= bmd->bi_size;
3156 * Don't send back more than was asked for. And if the bio
3157 * was empty, let the whole thing through because: "Note
3158 * that a block device *must* allow a single page to be
3159 * added to an empty bio."
3161 rbd_assert(bvec->bv_len <= PAGE_SIZE);
3162 if (ret > (int) bvec->bv_len || !bmd->bi_size)
3163 ret = (int) bvec->bv_len;
3168 static void rbd_free_disk(struct rbd_device *rbd_dev)
3170 struct gendisk *disk = rbd_dev->disk;
3175 rbd_dev->disk = NULL;
3176 if (disk->flags & GENHD_FL_UP) {
3179 blk_cleanup_queue(disk->queue);
3184 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3185 const char *object_name,
3186 u64 offset, u64 length, void *buf)
3189 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3190 struct rbd_obj_request *obj_request;
3191 struct page **pages = NULL;
3196 page_count = (u32) calc_pages_for(offset, length);
3197 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3199 ret = PTR_ERR(pages);
3202 obj_request = rbd_obj_request_create(object_name, offset, length,
3207 obj_request->pages = pages;
3208 obj_request->page_count = page_count;
3210 obj_request->osd_req = rbd_osd_req_create(rbd_dev, false, obj_request);
3211 if (!obj_request->osd_req)
3214 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3215 offset, length, 0, 0);
3216 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3218 obj_request->length,
3219 obj_request->offset & ~PAGE_MASK,
3221 rbd_osd_req_format_read(obj_request);
3223 ret = rbd_obj_request_submit(osdc, obj_request);
3226 ret = rbd_obj_request_wait(obj_request);
3230 ret = obj_request->result;
3234 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3235 size = (size_t) obj_request->xferred;
3236 ceph_copy_from_page_vector(pages, buf, 0, size);
3237 rbd_assert(size <= (size_t)INT_MAX);
3241 rbd_obj_request_put(obj_request);
3243 ceph_release_page_vector(pages, page_count);
3249 * Read the complete header for the given rbd device. On successful
3250 * return, the rbd_dev->header field will contain up-to-date
3251 * information about the image.
3253 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3255 struct rbd_image_header_ondisk *ondisk = NULL;
3262 * The complete header will include an array of its 64-bit
3263 * snapshot ids, followed by the names of those snapshots as
3264 * a contiguous block of NUL-terminated strings. Note that
3265 * the number of snapshots could change by the time we read
3266 * it in, in which case we re-read it.
3273 size = sizeof (*ondisk);
3274 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3276 ondisk = kmalloc(size, GFP_KERNEL);
3280 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3284 if ((size_t)ret < size) {
3286 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3290 if (!rbd_dev_ondisk_valid(ondisk)) {
3292 rbd_warn(rbd_dev, "invalid header");
3296 names_size = le64_to_cpu(ondisk->snap_names_len);
3297 want_count = snap_count;
3298 snap_count = le32_to_cpu(ondisk->snap_count);
3299 } while (snap_count != want_count);
3301 ret = rbd_header_from_disk(rbd_dev, ondisk);
3309 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3310 * has disappeared from the (just updated) snapshot context.
3312 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3316 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3319 snap_id = rbd_dev->spec->snap_id;
3320 if (snap_id == CEPH_NOSNAP)
3323 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3324 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3327 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3332 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
3333 mapping_size = rbd_dev->mapping.size;
3334 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
3335 if (rbd_dev->image_format == 1)
3336 ret = rbd_dev_v1_header_info(rbd_dev);
3338 ret = rbd_dev_v2_header_info(rbd_dev);
3340 /* If it's a mapped snapshot, validate its EXISTS flag */
3342 rbd_exists_validate(rbd_dev);
3343 mutex_unlock(&ctl_mutex);
3344 if (mapping_size != rbd_dev->mapping.size) {
3347 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3348 dout("setting size to %llu sectors", (unsigned long long)size);
3349 set_capacity(rbd_dev->disk, size);
3350 revalidate_disk(rbd_dev->disk);
3356 static int rbd_init_disk(struct rbd_device *rbd_dev)
3358 struct gendisk *disk;
3359 struct request_queue *q;
3362 /* create gendisk info */
3363 disk = alloc_disk(RBD_MINORS_PER_MAJOR);
3367 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3369 disk->major = rbd_dev->major;
3370 disk->first_minor = 0;
3371 disk->fops = &rbd_bd_ops;
3372 disk->private_data = rbd_dev;
3374 q = blk_init_queue(rbd_request_fn, &rbd_dev->lock);
3378 /* We use the default size, but let's be explicit about it. */
3379 blk_queue_physical_block_size(q, SECTOR_SIZE);
3381 /* set io sizes to object size */
3382 segment_size = rbd_obj_bytes(&rbd_dev->header);
3383 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3384 blk_queue_max_segment_size(q, segment_size);
3385 blk_queue_io_min(q, segment_size);
3386 blk_queue_io_opt(q, segment_size);
3388 blk_queue_merge_bvec(q, rbd_merge_bvec);
3391 q->queuedata = rbd_dev;
3393 rbd_dev->disk = disk;
3406 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3408 return container_of(dev, struct rbd_device, dev);
3411 static ssize_t rbd_size_show(struct device *dev,
3412 struct device_attribute *attr, char *buf)
3414 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3416 return sprintf(buf, "%llu\n",
3417 (unsigned long long)rbd_dev->mapping.size);
3421 * Note this shows the features for whatever's mapped, which is not
3422 * necessarily the base image.
3424 static ssize_t rbd_features_show(struct device *dev,
3425 struct device_attribute *attr, char *buf)
3427 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3429 return sprintf(buf, "0x%016llx\n",
3430 (unsigned long long)rbd_dev->mapping.features);
3433 static ssize_t rbd_major_show(struct device *dev,
3434 struct device_attribute *attr, char *buf)
3436 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3439 return sprintf(buf, "%d\n", rbd_dev->major);
3441 return sprintf(buf, "(none)\n");
3445 static ssize_t rbd_client_id_show(struct device *dev,
3446 struct device_attribute *attr, char *buf)
3448 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3450 return sprintf(buf, "client%lld\n",
3451 ceph_client_id(rbd_dev->rbd_client->client));
3454 static ssize_t rbd_pool_show(struct device *dev,
3455 struct device_attribute *attr, char *buf)
3457 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3459 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3462 static ssize_t rbd_pool_id_show(struct device *dev,
3463 struct device_attribute *attr, char *buf)
3465 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3467 return sprintf(buf, "%llu\n",
3468 (unsigned long long) rbd_dev->spec->pool_id);
3471 static ssize_t rbd_name_show(struct device *dev,
3472 struct device_attribute *attr, char *buf)
3474 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3476 if (rbd_dev->spec->image_name)
3477 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3479 return sprintf(buf, "(unknown)\n");
3482 static ssize_t rbd_image_id_show(struct device *dev,
3483 struct device_attribute *attr, char *buf)
3485 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3487 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3491 * Shows the name of the currently-mapped snapshot (or
3492 * RBD_SNAP_HEAD_NAME for the base image).
3494 static ssize_t rbd_snap_show(struct device *dev,
3495 struct device_attribute *attr,
3498 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3500 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3504 * For an rbd v2 image, shows the pool id, image id, and snapshot id
3505 * for the parent image. If there is no parent, simply shows
3506 * "(no parent image)".
3508 static ssize_t rbd_parent_show(struct device *dev,
3509 struct device_attribute *attr,
3512 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3513 struct rbd_spec *spec = rbd_dev->parent_spec;
3518 return sprintf(buf, "(no parent image)\n");
3520 count = sprintf(bufp, "pool_id %llu\npool_name %s\n",
3521 (unsigned long long) spec->pool_id, spec->pool_name);
3526 count = sprintf(bufp, "image_id %s\nimage_name %s\n", spec->image_id,
3527 spec->image_name ? spec->image_name : "(unknown)");
3532 count = sprintf(bufp, "snap_id %llu\nsnap_name %s\n",
3533 (unsigned long long) spec->snap_id, spec->snap_name);
3538 count = sprintf(bufp, "overlap %llu\n", rbd_dev->parent_overlap);
3543 return (ssize_t) (bufp - buf);
3546 static ssize_t rbd_image_refresh(struct device *dev,
3547 struct device_attribute *attr,
3551 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3554 ret = rbd_dev_refresh(rbd_dev);
3556 rbd_warn(rbd_dev, ": manual header refresh error (%d)\n", ret);
3558 return ret < 0 ? ret : size;
3561 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3562 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3563 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3564 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3565 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3566 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3567 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3568 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3569 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3570 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3571 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3573 static struct attribute *rbd_attrs[] = {
3574 &dev_attr_size.attr,
3575 &dev_attr_features.attr,
3576 &dev_attr_major.attr,
3577 &dev_attr_client_id.attr,
3578 &dev_attr_pool.attr,
3579 &dev_attr_pool_id.attr,
3580 &dev_attr_name.attr,
3581 &dev_attr_image_id.attr,
3582 &dev_attr_current_snap.attr,
3583 &dev_attr_parent.attr,
3584 &dev_attr_refresh.attr,
3588 static struct attribute_group rbd_attr_group = {
3592 static const struct attribute_group *rbd_attr_groups[] = {
3597 static void rbd_sysfs_dev_release(struct device *dev)
3601 static struct device_type rbd_device_type = {
3603 .groups = rbd_attr_groups,
3604 .release = rbd_sysfs_dev_release,
3607 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3609 kref_get(&spec->kref);
3614 static void rbd_spec_free(struct kref *kref);
3615 static void rbd_spec_put(struct rbd_spec *spec)
3618 kref_put(&spec->kref, rbd_spec_free);
3621 static struct rbd_spec *rbd_spec_alloc(void)
3623 struct rbd_spec *spec;
3625 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
3628 kref_init(&spec->kref);
3633 static void rbd_spec_free(struct kref *kref)
3635 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
3637 kfree(spec->pool_name);
3638 kfree(spec->image_id);
3639 kfree(spec->image_name);
3640 kfree(spec->snap_name);
3644 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
3645 struct rbd_spec *spec)
3647 struct rbd_device *rbd_dev;
3649 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
3653 spin_lock_init(&rbd_dev->lock);
3655 atomic_set(&rbd_dev->parent_ref, 0);
3656 INIT_LIST_HEAD(&rbd_dev->node);
3657 init_rwsem(&rbd_dev->header_rwsem);
3659 rbd_dev->spec = spec;
3660 rbd_dev->rbd_client = rbdc;
3662 /* Initialize the layout used for all rbd requests */
3664 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3665 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
3666 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
3667 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
3672 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
3674 rbd_put_client(rbd_dev->rbd_client);
3675 rbd_spec_put(rbd_dev->spec);
3680 * Get the size and object order for an image snapshot, or if
3681 * snap_id is CEPH_NOSNAP, gets this information for the base
3684 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
3685 u8 *order, u64 *snap_size)
3687 __le64 snapid = cpu_to_le64(snap_id);
3692 } __attribute__ ((packed)) size_buf = { 0 };
3694 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3696 &snapid, sizeof (snapid),
3697 &size_buf, sizeof (size_buf));
3698 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3701 if (ret < sizeof (size_buf))
3705 *order = size_buf.order;
3706 *snap_size = le64_to_cpu(size_buf.size);
3708 dout(" snap_id 0x%016llx order = %u, snap_size = %llu\n",
3709 (unsigned long long)snap_id, (unsigned int)*order,
3710 (unsigned long long)*snap_size);
3715 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
3717 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
3718 &rbd_dev->header.obj_order,
3719 &rbd_dev->header.image_size);
3722 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
3728 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
3732 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3733 "rbd", "get_object_prefix", NULL, 0,
3734 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
3735 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3740 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
3741 p + ret, NULL, GFP_NOIO);
3744 if (IS_ERR(rbd_dev->header.object_prefix)) {
3745 ret = PTR_ERR(rbd_dev->header.object_prefix);
3746 rbd_dev->header.object_prefix = NULL;
3748 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
3756 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
3759 __le64 snapid = cpu_to_le64(snap_id);
3763 } __attribute__ ((packed)) features_buf = { 0 };
3767 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3768 "rbd", "get_features",
3769 &snapid, sizeof (snapid),
3770 &features_buf, sizeof (features_buf));
3771 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3774 if (ret < sizeof (features_buf))
3777 incompat = le64_to_cpu(features_buf.incompat);
3778 if (incompat & ~RBD_FEATURES_SUPPORTED)
3781 *snap_features = le64_to_cpu(features_buf.features);
3783 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
3784 (unsigned long long)snap_id,
3785 (unsigned long long)*snap_features,
3786 (unsigned long long)le64_to_cpu(features_buf.incompat));
3791 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
3793 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
3794 &rbd_dev->header.features);
3797 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
3799 struct rbd_spec *parent_spec;
3801 void *reply_buf = NULL;
3810 parent_spec = rbd_spec_alloc();
3814 size = sizeof (__le64) + /* pool_id */
3815 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
3816 sizeof (__le64) + /* snap_id */
3817 sizeof (__le64); /* overlap */
3818 reply_buf = kmalloc(size, GFP_KERNEL);
3824 snapid = cpu_to_le64(CEPH_NOSNAP);
3825 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3826 "rbd", "get_parent",
3827 &snapid, sizeof (snapid),
3829 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3834 end = reply_buf + ret;
3836 ceph_decode_64_safe(&p, end, pool_id, out_err);
3837 if (pool_id == CEPH_NOPOOL) {
3839 * Either the parent never existed, or we have
3840 * record of it but the image got flattened so it no
3841 * longer has a parent. When the parent of a
3842 * layered image disappears we immediately set the
3843 * overlap to 0. The effect of this is that all new
3844 * requests will be treated as if the image had no
3847 if (rbd_dev->parent_overlap) {
3848 rbd_dev->parent_overlap = 0;
3850 rbd_dev_parent_put(rbd_dev);
3851 pr_info("%s: clone image has been flattened\n",
3852 rbd_dev->disk->disk_name);
3855 goto out; /* No parent? No problem. */
3858 /* The ceph file layout needs to fit pool id in 32 bits */
3861 if (pool_id > (u64)U32_MAX) {
3862 rbd_warn(NULL, "parent pool id too large (%llu > %u)\n",
3863 (unsigned long long)pool_id, U32_MAX);
3866 parent_spec->pool_id = pool_id;
3868 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
3869 if (IS_ERR(image_id)) {
3870 ret = PTR_ERR(image_id);
3873 parent_spec->image_id = image_id;
3874 ceph_decode_64_safe(&p, end, parent_spec->snap_id, out_err);
3875 ceph_decode_64_safe(&p, end, overlap, out_err);
3878 rbd_spec_put(rbd_dev->parent_spec);
3879 rbd_dev->parent_spec = parent_spec;
3880 parent_spec = NULL; /* rbd_dev now owns this */
3881 rbd_dev->parent_overlap = overlap;
3883 rbd_warn(rbd_dev, "ignoring parent of clone with overlap 0\n");
3889 rbd_spec_put(parent_spec);
3894 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
3898 __le64 stripe_count;
3899 } __attribute__ ((packed)) striping_info_buf = { 0 };
3900 size_t size = sizeof (striping_info_buf);
3907 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
3908 "rbd", "get_stripe_unit_count", NULL, 0,
3909 (char *)&striping_info_buf, size);
3910 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
3917 * We don't actually support the "fancy striping" feature
3918 * (STRIPINGV2) yet, but if the striping sizes are the
3919 * defaults the behavior is the same as before. So find
3920 * out, and only fail if the image has non-default values.
3923 obj_size = (u64)1 << rbd_dev->header.obj_order;
3924 p = &striping_info_buf;
3925 stripe_unit = ceph_decode_64(&p);
3926 if (stripe_unit != obj_size) {
3927 rbd_warn(rbd_dev, "unsupported stripe unit "
3928 "(got %llu want %llu)",
3929 stripe_unit, obj_size);
3932 stripe_count = ceph_decode_64(&p);
3933 if (stripe_count != 1) {
3934 rbd_warn(rbd_dev, "unsupported stripe count "
3935 "(got %llu want 1)", stripe_count);
3938 rbd_dev->header.stripe_unit = stripe_unit;
3939 rbd_dev->header.stripe_count = stripe_count;
3944 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
3946 size_t image_id_size;
3951 void *reply_buf = NULL;
3953 char *image_name = NULL;
3956 rbd_assert(!rbd_dev->spec->image_name);
3958 len = strlen(rbd_dev->spec->image_id);
3959 image_id_size = sizeof (__le32) + len;
3960 image_id = kmalloc(image_id_size, GFP_KERNEL);
3965 end = image_id + image_id_size;
3966 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
3968 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
3969 reply_buf = kmalloc(size, GFP_KERNEL);
3973 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
3974 "rbd", "dir_get_name",
3975 image_id, image_id_size,
3980 end = reply_buf + ret;
3982 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
3983 if (IS_ERR(image_name))
3986 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
3994 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
3996 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
3997 const char *snap_name;
4000 /* Skip over names until we find the one we are looking for */
4002 snap_name = rbd_dev->header.snap_names;
4003 while (which < snapc->num_snaps) {
4004 if (!strcmp(name, snap_name))
4005 return snapc->snaps[which];
4006 snap_name += strlen(snap_name) + 1;
4012 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4014 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4019 for (which = 0; !found && which < snapc->num_snaps; which++) {
4020 const char *snap_name;
4022 snap_id = snapc->snaps[which];
4023 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4024 if (IS_ERR(snap_name))
4026 found = !strcmp(name, snap_name);
4029 return found ? snap_id : CEPH_NOSNAP;
4033 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4034 * no snapshot by that name is found, or if an error occurs.
4036 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4038 if (rbd_dev->image_format == 1)
4039 return rbd_v1_snap_id_by_name(rbd_dev, name);
4041 return rbd_v2_snap_id_by_name(rbd_dev, name);
4045 * When an rbd image has a parent image, it is identified by the
4046 * pool, image, and snapshot ids (not names). This function fills
4047 * in the names for those ids. (It's OK if we can't figure out the
4048 * name for an image id, but the pool and snapshot ids should always
4049 * exist and have names.) All names in an rbd spec are dynamically
4052 * When an image being mapped (not a parent) is probed, we have the
4053 * pool name and pool id, image name and image id, and the snapshot
4054 * name. The only thing we're missing is the snapshot id.
4056 static int rbd_dev_spec_update(struct rbd_device *rbd_dev)
4058 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4059 struct rbd_spec *spec = rbd_dev->spec;
4060 const char *pool_name;
4061 const char *image_name;
4062 const char *snap_name;
4066 * An image being mapped will have the pool name (etc.), but
4067 * we need to look up the snapshot id.
4069 if (spec->pool_name) {
4070 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4073 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4074 if (snap_id == CEPH_NOSNAP)
4076 spec->snap_id = snap_id;
4078 spec->snap_id = CEPH_NOSNAP;
4084 /* Get the pool name; we have to make our own copy of this */
4086 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4088 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4091 pool_name = kstrdup(pool_name, GFP_KERNEL);
4095 /* Fetch the image name; tolerate failure here */
4097 image_name = rbd_dev_image_name(rbd_dev);
4099 rbd_warn(rbd_dev, "unable to get image name");
4101 /* Look up the snapshot name, and make a copy */
4103 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4109 spec->pool_name = pool_name;
4110 spec->image_name = image_name;
4111 spec->snap_name = snap_name;
4121 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4130 struct ceph_snap_context *snapc;
4134 * We'll need room for the seq value (maximum snapshot id),
4135 * snapshot count, and array of that many snapshot ids.
4136 * For now we have a fixed upper limit on the number we're
4137 * prepared to receive.
4139 size = sizeof (__le64) + sizeof (__le32) +
4140 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4141 reply_buf = kzalloc(size, GFP_KERNEL);
4145 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4146 "rbd", "get_snapcontext", NULL, 0,
4148 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4153 end = reply_buf + ret;
4155 ceph_decode_64_safe(&p, end, seq, out);
4156 ceph_decode_32_safe(&p, end, snap_count, out);
4159 * Make sure the reported number of snapshot ids wouldn't go
4160 * beyond the end of our buffer. But before checking that,
4161 * make sure the computed size of the snapshot context we
4162 * allocate is representable in a size_t.
4164 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4169 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4173 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4179 for (i = 0; i < snap_count; i++)
4180 snapc->snaps[i] = ceph_decode_64(&p);
4182 ceph_put_snap_context(rbd_dev->header.snapc);
4183 rbd_dev->header.snapc = snapc;
4185 dout(" snap context seq = %llu, snap_count = %u\n",
4186 (unsigned long long)seq, (unsigned int)snap_count);
4193 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4204 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4205 reply_buf = kmalloc(size, GFP_KERNEL);
4207 return ERR_PTR(-ENOMEM);
4209 snapid = cpu_to_le64(snap_id);
4210 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4211 "rbd", "get_snapshot_name",
4212 &snapid, sizeof (snapid),
4214 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4216 snap_name = ERR_PTR(ret);
4221 end = reply_buf + ret;
4222 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4223 if (IS_ERR(snap_name))
4226 dout(" snap_id 0x%016llx snap_name = %s\n",
4227 (unsigned long long)snap_id, snap_name);
4234 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4236 bool first_time = rbd_dev->header.object_prefix == NULL;
4239 down_write(&rbd_dev->header_rwsem);
4242 ret = rbd_dev_v2_header_onetime(rbd_dev);
4248 * If the image supports layering, get the parent info. We
4249 * need to probe the first time regardless. Thereafter we
4250 * only need to if there's a parent, to see if it has
4251 * disappeared due to the mapped image getting flattened.
4253 if (rbd_dev->header.features & RBD_FEATURE_LAYERING &&
4254 (first_time || rbd_dev->parent_spec)) {
4257 ret = rbd_dev_v2_parent_info(rbd_dev);
4262 * Print a warning if this is the initial probe and
4263 * the image has a parent. Don't print it if the
4264 * image now being probed is itself a parent. We
4265 * can tell at this point because we won't know its
4266 * pool name yet (just its pool id).
4268 warn = rbd_dev->parent_spec && rbd_dev->spec->pool_name;
4269 if (first_time && warn)
4270 rbd_warn(rbd_dev, "WARNING: kernel layering "
4271 "is EXPERIMENTAL!");
4274 ret = rbd_dev_v2_image_size(rbd_dev);
4278 if (rbd_dev->spec->snap_id == CEPH_NOSNAP)
4279 if (rbd_dev->mapping.size != rbd_dev->header.image_size)
4280 rbd_dev->mapping.size = rbd_dev->header.image_size;
4282 ret = rbd_dev_v2_snap_context(rbd_dev);
4283 dout("rbd_dev_v2_snap_context returned %d\n", ret);
4285 up_write(&rbd_dev->header_rwsem);
4290 static int rbd_bus_add_dev(struct rbd_device *rbd_dev)
4295 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
4297 dev = &rbd_dev->dev;
4298 dev->bus = &rbd_bus_type;
4299 dev->type = &rbd_device_type;
4300 dev->parent = &rbd_root_dev;
4301 dev->release = rbd_dev_device_release;
4302 dev_set_name(dev, "%d", rbd_dev->dev_id);
4303 ret = device_register(dev);
4305 mutex_unlock(&ctl_mutex);
4310 static void rbd_bus_del_dev(struct rbd_device *rbd_dev)
4312 device_unregister(&rbd_dev->dev);
4315 static atomic64_t rbd_dev_id_max = ATOMIC64_INIT(0);
4318 * Get a unique rbd identifier for the given new rbd_dev, and add
4319 * the rbd_dev to the global list. The minimum rbd id is 1.
4321 static void rbd_dev_id_get(struct rbd_device *rbd_dev)
4323 rbd_dev->dev_id = atomic64_inc_return(&rbd_dev_id_max);
4325 spin_lock(&rbd_dev_list_lock);
4326 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4327 spin_unlock(&rbd_dev_list_lock);
4328 dout("rbd_dev %p given dev id %llu\n", rbd_dev,
4329 (unsigned long long) rbd_dev->dev_id);
4333 * Remove an rbd_dev from the global list, and record that its
4334 * identifier is no longer in use.
4336 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4338 struct list_head *tmp;
4339 int rbd_id = rbd_dev->dev_id;
4342 rbd_assert(rbd_id > 0);
4344 dout("rbd_dev %p released dev id %llu\n", rbd_dev,
4345 (unsigned long long) rbd_dev->dev_id);
4346 spin_lock(&rbd_dev_list_lock);
4347 list_del_init(&rbd_dev->node);
4350 * If the id being "put" is not the current maximum, there
4351 * is nothing special we need to do.
4353 if (rbd_id != atomic64_read(&rbd_dev_id_max)) {
4354 spin_unlock(&rbd_dev_list_lock);
4359 * We need to update the current maximum id. Search the
4360 * list to find out what it is. We're more likely to find
4361 * the maximum at the end, so search the list backward.
4364 list_for_each_prev(tmp, &rbd_dev_list) {
4365 struct rbd_device *rbd_dev;
4367 rbd_dev = list_entry(tmp, struct rbd_device, node);
4368 if (rbd_dev->dev_id > max_id)
4369 max_id = rbd_dev->dev_id;
4371 spin_unlock(&rbd_dev_list_lock);
4374 * The max id could have been updated by rbd_dev_id_get(), in
4375 * which case it now accurately reflects the new maximum.
4376 * Be careful not to overwrite the maximum value in that
4379 atomic64_cmpxchg(&rbd_dev_id_max, rbd_id, max_id);
4380 dout(" max dev id has been reset\n");
4384 * Skips over white space at *buf, and updates *buf to point to the
4385 * first found non-space character (if any). Returns the length of
4386 * the token (string of non-white space characters) found. Note
4387 * that *buf must be terminated with '\0'.
4389 static inline size_t next_token(const char **buf)
4392 * These are the characters that produce nonzero for
4393 * isspace() in the "C" and "POSIX" locales.
4395 const char *spaces = " \f\n\r\t\v";
4397 *buf += strspn(*buf, spaces); /* Find start of token */
4399 return strcspn(*buf, spaces); /* Return token length */
4403 * Finds the next token in *buf, and if the provided token buffer is
4404 * big enough, copies the found token into it. The result, if
4405 * copied, is guaranteed to be terminated with '\0'. Note that *buf
4406 * must be terminated with '\0' on entry.
4408 * Returns the length of the token found (not including the '\0').
4409 * Return value will be 0 if no token is found, and it will be >=
4410 * token_size if the token would not fit.
4412 * The *buf pointer will be updated to point beyond the end of the
4413 * found token. Note that this occurs even if the token buffer is
4414 * too small to hold it.
4416 static inline size_t copy_token(const char **buf,
4422 len = next_token(buf);
4423 if (len < token_size) {
4424 memcpy(token, *buf, len);
4425 *(token + len) = '\0';
4433 * Finds the next token in *buf, dynamically allocates a buffer big
4434 * enough to hold a copy of it, and copies the token into the new
4435 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4436 * that a duplicate buffer is created even for a zero-length token.
4438 * Returns a pointer to the newly-allocated duplicate, or a null
4439 * pointer if memory for the duplicate was not available. If
4440 * the lenp argument is a non-null pointer, the length of the token
4441 * (not including the '\0') is returned in *lenp.
4443 * If successful, the *buf pointer will be updated to point beyond
4444 * the end of the found token.
4446 * Note: uses GFP_KERNEL for allocation.
4448 static inline char *dup_token(const char **buf, size_t *lenp)
4453 len = next_token(buf);
4454 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4457 *(dup + len) = '\0';
4467 * Parse the options provided for an "rbd add" (i.e., rbd image
4468 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4469 * and the data written is passed here via a NUL-terminated buffer.
4470 * Returns 0 if successful or an error code otherwise.
4472 * The information extracted from these options is recorded in
4473 * the other parameters which return dynamically-allocated
4476 * The address of a pointer that will refer to a ceph options
4477 * structure. Caller must release the returned pointer using
4478 * ceph_destroy_options() when it is no longer needed.
4480 * Address of an rbd options pointer. Fully initialized by
4481 * this function; caller must release with kfree().
4483 * Address of an rbd image specification pointer. Fully
4484 * initialized by this function based on parsed options.
4485 * Caller must release with rbd_spec_put().
4487 * The options passed take this form:
4488 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4491 * A comma-separated list of one or more monitor addresses.
4492 * A monitor address is an ip address, optionally followed
4493 * by a port number (separated by a colon).
4494 * I.e.: ip1[:port1][,ip2[:port2]...]
4496 * A comma-separated list of ceph and/or rbd options.
4498 * The name of the rados pool containing the rbd image.
4500 * The name of the image in that pool to map.
4502 * An optional snapshot id. If provided, the mapping will
4503 * present data from the image at the time that snapshot was
4504 * created. The image head is used if no snapshot id is
4505 * provided. Snapshot mappings are always read-only.
4507 static int rbd_add_parse_args(const char *buf,
4508 struct ceph_options **ceph_opts,
4509 struct rbd_options **opts,
4510 struct rbd_spec **rbd_spec)
4514 const char *mon_addrs;
4516 size_t mon_addrs_size;
4517 struct rbd_spec *spec = NULL;
4518 struct rbd_options *rbd_opts = NULL;
4519 struct ceph_options *copts;
4522 /* The first four tokens are required */
4524 len = next_token(&buf);
4526 rbd_warn(NULL, "no monitor address(es) provided");
4530 mon_addrs_size = len + 1;
4534 options = dup_token(&buf, NULL);
4538 rbd_warn(NULL, "no options provided");
4542 spec = rbd_spec_alloc();
4546 spec->pool_name = dup_token(&buf, NULL);
4547 if (!spec->pool_name)
4549 if (!*spec->pool_name) {
4550 rbd_warn(NULL, "no pool name provided");
4554 spec->image_name = dup_token(&buf, NULL);
4555 if (!spec->image_name)
4557 if (!*spec->image_name) {
4558 rbd_warn(NULL, "no image name provided");
4563 * Snapshot name is optional; default is to use "-"
4564 * (indicating the head/no snapshot).
4566 len = next_token(&buf);
4568 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4569 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4570 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4571 ret = -ENAMETOOLONG;
4574 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4577 *(snap_name + len) = '\0';
4578 spec->snap_name = snap_name;
4580 /* Initialize all rbd options to the defaults */
4582 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4586 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4588 copts = ceph_parse_options(options, mon_addrs,
4589 mon_addrs + mon_addrs_size - 1,
4590 parse_rbd_opts_token, rbd_opts);
4591 if (IS_ERR(copts)) {
4592 ret = PTR_ERR(copts);
4613 * An rbd format 2 image has a unique identifier, distinct from the
4614 * name given to it by the user. Internally, that identifier is
4615 * what's used to specify the names of objects related to the image.
4617 * A special "rbd id" object is used to map an rbd image name to its
4618 * id. If that object doesn't exist, then there is no v2 rbd image
4619 * with the supplied name.
4621 * This function will record the given rbd_dev's image_id field if
4622 * it can be determined, and in that case will return 0. If any
4623 * errors occur a negative errno will be returned and the rbd_dev's
4624 * image_id field will be unchanged (and should be NULL).
4626 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
4635 * When probing a parent image, the image id is already
4636 * known (and the image name likely is not). There's no
4637 * need to fetch the image id again in this case. We
4638 * do still need to set the image format though.
4640 if (rbd_dev->spec->image_id) {
4641 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
4647 * First, see if the format 2 image id file exists, and if
4648 * so, get the image's persistent id from it.
4650 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
4651 object_name = kmalloc(size, GFP_NOIO);
4654 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
4655 dout("rbd id object name is %s\n", object_name);
4657 /* Response will be an encoded string, which includes a length */
4659 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
4660 response = kzalloc(size, GFP_NOIO);
4666 /* If it doesn't exist we'll assume it's a format 1 image */
4668 ret = rbd_obj_method_sync(rbd_dev, object_name,
4669 "rbd", "get_id", NULL, 0,
4670 response, RBD_IMAGE_ID_LEN_MAX);
4671 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4672 if (ret == -ENOENT) {
4673 image_id = kstrdup("", GFP_KERNEL);
4674 ret = image_id ? 0 : -ENOMEM;
4676 rbd_dev->image_format = 1;
4677 } else if (ret > sizeof (__le32)) {
4680 image_id = ceph_extract_encoded_string(&p, p + ret,
4682 ret = IS_ERR(image_id) ? PTR_ERR(image_id) : 0;
4684 rbd_dev->image_format = 2;
4690 rbd_dev->spec->image_id = image_id;
4691 dout("image_id is %s\n", image_id);
4700 /* Undo whatever state changes are made by v1 or v2 image probe */
4702 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
4704 struct rbd_image_header *header;
4706 /* Drop parent reference unless it's already been done (or none) */
4708 if (rbd_dev->parent_overlap)
4709 rbd_dev_parent_put(rbd_dev);
4711 /* Free dynamic fields from the header, then zero it out */
4713 header = &rbd_dev->header;
4714 ceph_put_snap_context(header->snapc);
4715 kfree(header->snap_sizes);
4716 kfree(header->snap_names);
4717 kfree(header->object_prefix);
4718 memset(header, 0, sizeof (*header));
4721 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
4725 ret = rbd_dev_v2_object_prefix(rbd_dev);
4730 * Get the and check features for the image. Currently the
4731 * features are assumed to never change.
4733 ret = rbd_dev_v2_features(rbd_dev);
4737 /* If the image supports fancy striping, get its parameters */
4739 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
4740 ret = rbd_dev_v2_striping_info(rbd_dev);
4744 /* No support for crypto and compression type format 2 images */
4748 rbd_dev->header.features = 0;
4749 kfree(rbd_dev->header.object_prefix);
4750 rbd_dev->header.object_prefix = NULL;
4755 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev)
4757 struct rbd_device *parent = NULL;
4758 struct rbd_spec *parent_spec;
4759 struct rbd_client *rbdc;
4762 if (!rbd_dev->parent_spec)
4765 * We need to pass a reference to the client and the parent
4766 * spec when creating the parent rbd_dev. Images related by
4767 * parent/child relationships always share both.
4769 parent_spec = rbd_spec_get(rbd_dev->parent_spec);
4770 rbdc = __rbd_get_client(rbd_dev->rbd_client);
4773 parent = rbd_dev_create(rbdc, parent_spec);
4777 ret = rbd_dev_image_probe(parent, false);
4780 rbd_dev->parent = parent;
4781 atomic_set(&rbd_dev->parent_ref, 1);
4786 rbd_dev_unparent(rbd_dev);
4787 kfree(rbd_dev->header_name);
4788 rbd_dev_destroy(parent);
4790 rbd_put_client(rbdc);
4791 rbd_spec_put(parent_spec);
4797 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
4801 /* generate unique id: find highest unique id, add one */
4802 rbd_dev_id_get(rbd_dev);
4804 /* Fill in the device name, now that we have its id. */
4805 BUILD_BUG_ON(DEV_NAME_LEN
4806 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
4807 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
4809 /* Get our block major device number. */
4811 ret = register_blkdev(0, rbd_dev->name);
4814 rbd_dev->major = ret;
4816 /* Set up the blkdev mapping. */
4818 ret = rbd_init_disk(rbd_dev);
4820 goto err_out_blkdev;
4822 ret = rbd_dev_mapping_set(rbd_dev);
4825 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
4827 ret = rbd_bus_add_dev(rbd_dev);
4829 goto err_out_mapping;
4831 /* Everything's ready. Announce the disk to the world. */
4833 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4834 add_disk(rbd_dev->disk);
4836 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
4837 (unsigned long long) rbd_dev->mapping.size);
4842 rbd_dev_mapping_clear(rbd_dev);
4844 rbd_free_disk(rbd_dev);
4846 unregister_blkdev(rbd_dev->major, rbd_dev->name);
4848 rbd_dev_id_put(rbd_dev);
4849 rbd_dev_mapping_clear(rbd_dev);
4854 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
4856 struct rbd_spec *spec = rbd_dev->spec;
4859 /* Record the header object name for this rbd image. */
4861 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4863 if (rbd_dev->image_format == 1)
4864 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
4866 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
4868 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
4869 if (!rbd_dev->header_name)
4872 if (rbd_dev->image_format == 1)
4873 sprintf(rbd_dev->header_name, "%s%s",
4874 spec->image_name, RBD_SUFFIX);
4876 sprintf(rbd_dev->header_name, "%s%s",
4877 RBD_HEADER_PREFIX, spec->image_id);
4881 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
4883 rbd_dev_unprobe(rbd_dev);
4884 kfree(rbd_dev->header_name);
4885 rbd_dev->header_name = NULL;
4886 rbd_dev->image_format = 0;
4887 kfree(rbd_dev->spec->image_id);
4888 rbd_dev->spec->image_id = NULL;
4890 rbd_dev_destroy(rbd_dev);
4894 * Probe for the existence of the header object for the given rbd
4895 * device. If this image is the one being mapped (i.e., not a
4896 * parent), initiate a watch on its header object before using that
4897 * object to get detailed information about the rbd image.
4899 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, bool mapping)
4905 * Get the id from the image id object. If it's not a
4906 * format 2 image, we'll get ENOENT back, and we'll assume
4907 * it's a format 1 image.
4909 ret = rbd_dev_image_id(rbd_dev);
4912 rbd_assert(rbd_dev->spec->image_id);
4913 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4915 ret = rbd_dev_header_name(rbd_dev);
4917 goto err_out_format;
4920 ret = rbd_dev_header_watch_sync(rbd_dev, true);
4922 goto out_header_name;
4925 if (rbd_dev->image_format == 1)
4926 ret = rbd_dev_v1_header_info(rbd_dev);
4928 ret = rbd_dev_v2_header_info(rbd_dev);
4932 ret = rbd_dev_spec_update(rbd_dev);
4936 ret = rbd_dev_probe_parent(rbd_dev);
4940 dout("discovered format %u image, header name is %s\n",
4941 rbd_dev->image_format, rbd_dev->header_name);
4945 rbd_dev_unprobe(rbd_dev);
4948 tmp = rbd_dev_header_watch_sync(rbd_dev, false);
4950 rbd_warn(rbd_dev, "unable to tear down "
4951 "watch request (%d)\n", tmp);
4954 kfree(rbd_dev->header_name);
4955 rbd_dev->header_name = NULL;
4957 rbd_dev->image_format = 0;
4958 kfree(rbd_dev->spec->image_id);
4959 rbd_dev->spec->image_id = NULL;
4961 dout("probe failed, returning %d\n", ret);
4966 static ssize_t rbd_add(struct bus_type *bus,
4970 struct rbd_device *rbd_dev = NULL;
4971 struct ceph_options *ceph_opts = NULL;
4972 struct rbd_options *rbd_opts = NULL;
4973 struct rbd_spec *spec = NULL;
4974 struct rbd_client *rbdc;
4975 struct ceph_osd_client *osdc;
4979 if (!try_module_get(THIS_MODULE))
4982 /* parse add command */
4983 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
4985 goto err_out_module;
4986 read_only = rbd_opts->read_only;
4988 rbd_opts = NULL; /* done with this */
4990 rbdc = rbd_get_client(ceph_opts);
4995 ceph_opts = NULL; /* rbd_dev client now owns this */
4998 osdc = &rbdc->client->osdc;
4999 rc = ceph_pg_poolid_by_name(osdc->osdmap, spec->pool_name);
5001 goto err_out_client;
5002 spec->pool_id = (u64)rc;
5004 /* The ceph file layout needs to fit pool id in 32 bits */
5006 if (spec->pool_id > (u64)U32_MAX) {
5007 rbd_warn(NULL, "pool id too large (%llu > %u)\n",
5008 (unsigned long long)spec->pool_id, U32_MAX);
5010 goto err_out_client;
5013 rbd_dev = rbd_dev_create(rbdc, spec);
5015 goto err_out_client;
5016 rbdc = NULL; /* rbd_dev now owns this */
5017 spec = NULL; /* rbd_dev now owns this */
5019 rc = rbd_dev_image_probe(rbd_dev, true);
5021 goto err_out_rbd_dev;
5023 /* If we are mapping a snapshot it must be marked read-only */
5025 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5027 rbd_dev->mapping.read_only = read_only;
5029 rc = rbd_dev_device_setup(rbd_dev);
5033 rbd_dev_image_release(rbd_dev);
5035 rbd_dev_destroy(rbd_dev);
5037 rbd_put_client(rbdc);
5040 ceph_destroy_options(ceph_opts);
5044 module_put(THIS_MODULE);
5046 dout("Error adding device %s\n", buf);
5051 static struct rbd_device *__rbd_get_dev(unsigned long dev_id)
5053 struct list_head *tmp;
5054 struct rbd_device *rbd_dev;
5056 spin_lock(&rbd_dev_list_lock);
5057 list_for_each(tmp, &rbd_dev_list) {
5058 rbd_dev = list_entry(tmp, struct rbd_device, node);
5059 if (rbd_dev->dev_id == dev_id) {
5060 spin_unlock(&rbd_dev_list_lock);
5064 spin_unlock(&rbd_dev_list_lock);
5068 static void rbd_dev_device_release(struct device *dev)
5070 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5072 rbd_free_disk(rbd_dev);
5073 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5074 rbd_dev_mapping_clear(rbd_dev);
5075 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5077 rbd_dev_id_put(rbd_dev);
5078 rbd_dev_mapping_clear(rbd_dev);
5081 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5083 while (rbd_dev->parent) {
5084 struct rbd_device *first = rbd_dev;
5085 struct rbd_device *second = first->parent;
5086 struct rbd_device *third;
5089 * Follow to the parent with no grandparent and
5092 while (second && (third = second->parent)) {
5097 rbd_dev_image_release(second);
5098 first->parent = NULL;
5099 first->parent_overlap = 0;
5101 rbd_assert(first->parent_spec);
5102 rbd_spec_put(first->parent_spec);
5103 first->parent_spec = NULL;
5107 static ssize_t rbd_remove(struct bus_type *bus,
5111 struct rbd_device *rbd_dev = NULL;
5116 ret = strict_strtoul(buf, 10, &ul);
5120 /* convert to int; abort if we lost anything in the conversion */
5121 target_id = (int) ul;
5122 if (target_id != ul)
5125 mutex_lock_nested(&ctl_mutex, SINGLE_DEPTH_NESTING);
5127 rbd_dev = __rbd_get_dev(target_id);
5133 spin_lock_irq(&rbd_dev->lock);
5134 if (rbd_dev->open_count)
5137 set_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
5138 spin_unlock_irq(&rbd_dev->lock);
5141 rbd_bus_del_dev(rbd_dev);
5142 ret = rbd_dev_header_watch_sync(rbd_dev, false);
5144 rbd_warn(rbd_dev, "failed to cancel watch event (%d)\n", ret);
5145 rbd_dev_image_release(rbd_dev);
5146 module_put(THIS_MODULE);
5149 mutex_unlock(&ctl_mutex);
5155 * create control files in sysfs
5158 static int rbd_sysfs_init(void)
5162 ret = device_register(&rbd_root_dev);
5166 ret = bus_register(&rbd_bus_type);
5168 device_unregister(&rbd_root_dev);
5173 static void rbd_sysfs_cleanup(void)
5175 bus_unregister(&rbd_bus_type);
5176 device_unregister(&rbd_root_dev);
5179 static int rbd_slab_init(void)
5181 rbd_assert(!rbd_img_request_cache);
5182 rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5183 sizeof (struct rbd_img_request),
5184 __alignof__(struct rbd_img_request),
5186 if (!rbd_img_request_cache)
5189 rbd_assert(!rbd_obj_request_cache);
5190 rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5191 sizeof (struct rbd_obj_request),
5192 __alignof__(struct rbd_obj_request),
5194 if (!rbd_obj_request_cache)
5197 rbd_assert(!rbd_segment_name_cache);
5198 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5199 MAX_OBJ_NAME_SIZE + 1, 1, 0, NULL);
5200 if (rbd_segment_name_cache)
5203 if (rbd_obj_request_cache) {
5204 kmem_cache_destroy(rbd_obj_request_cache);
5205 rbd_obj_request_cache = NULL;
5208 kmem_cache_destroy(rbd_img_request_cache);
5209 rbd_img_request_cache = NULL;
5214 static void rbd_slab_exit(void)
5216 rbd_assert(rbd_segment_name_cache);
5217 kmem_cache_destroy(rbd_segment_name_cache);
5218 rbd_segment_name_cache = NULL;
5220 rbd_assert(rbd_obj_request_cache);
5221 kmem_cache_destroy(rbd_obj_request_cache);
5222 rbd_obj_request_cache = NULL;
5224 rbd_assert(rbd_img_request_cache);
5225 kmem_cache_destroy(rbd_img_request_cache);
5226 rbd_img_request_cache = NULL;
5229 static int __init rbd_init(void)
5233 if (!libceph_compatible(NULL)) {
5234 rbd_warn(NULL, "libceph incompatibility (quitting)");
5238 rc = rbd_slab_init();
5241 rc = rbd_sysfs_init();
5245 pr_info("loaded " RBD_DRV_NAME_LONG "\n");
5250 static void __exit rbd_exit(void)
5252 rbd_sysfs_cleanup();
5256 module_init(rbd_init);
5257 module_exit(rbd_exit);
5259 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5260 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5261 MODULE_DESCRIPTION("rados block device");
5263 /* following authorship retained from original osdblk.c */
5264 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5266 MODULE_LICENSE("GPL");