3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/cls_lock_client.h>
35 #include <linux/ceph/striper.h>
36 #include <linux/ceph/decode.h>
37 #include <linux/parser.h>
38 #include <linux/bsearch.h>
40 #include <linux/kernel.h>
41 #include <linux/device.h>
42 #include <linux/module.h>
43 #include <linux/blk-mq.h>
45 #include <linux/blkdev.h>
46 #include <linux/slab.h>
47 #include <linux/idr.h>
48 #include <linux/workqueue.h>
50 #include "rbd_types.h"
52 #define RBD_DEBUG /* Activate rbd_assert() calls */
55 * Increment the given counter and return its updated value.
56 * If the counter is already 0 it will not be incremented.
57 * If the counter is already at its maximum value returns
58 * -EINVAL without updating it.
60 static int atomic_inc_return_safe(atomic_t *v)
64 counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0);
65 if (counter <= (unsigned int)INT_MAX)
73 /* Decrement the counter. Return the resulting value, or -EINVAL */
74 static int atomic_dec_return_safe(atomic_t *v)
78 counter = atomic_dec_return(v);
87 #define RBD_DRV_NAME "rbd"
89 #define RBD_MINORS_PER_MAJOR 256
90 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
92 #define RBD_MAX_PARENT_CHAIN_LEN 16
94 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
95 #define RBD_MAX_SNAP_NAME_LEN \
96 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
98 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
100 #define RBD_SNAP_HEAD_NAME "-"
102 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
104 /* This allows a single page to hold an image name sent by OSD */
105 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
106 #define RBD_IMAGE_ID_LEN_MAX 64
108 #define RBD_OBJ_PREFIX_LEN_MAX 64
110 #define RBD_NOTIFY_TIMEOUT 5 /* seconds */
111 #define RBD_RETRY_DELAY msecs_to_jiffies(1000)
115 #define RBD_FEATURE_LAYERING (1ULL<<0)
116 #define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
117 #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
118 #define RBD_FEATURE_DATA_POOL (1ULL<<7)
119 #define RBD_FEATURE_OPERATIONS (1ULL<<8)
121 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
122 RBD_FEATURE_STRIPINGV2 | \
123 RBD_FEATURE_EXCLUSIVE_LOCK | \
124 RBD_FEATURE_DATA_POOL | \
125 RBD_FEATURE_OPERATIONS)
127 /* Features supported by this (client software) implementation. */
129 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
132 * An RBD device name will be "rbd#", where the "rbd" comes from
133 * RBD_DRV_NAME above, and # is a unique integer identifier.
135 #define DEV_NAME_LEN 32
138 * block device image metadata (in-memory version)
140 struct rbd_image_header {
141 /* These six fields never change for a given rbd image */
147 u64 features; /* Might be changeable someday? */
149 /* The remaining fields need to be updated occasionally */
151 struct ceph_snap_context *snapc;
152 char *snap_names; /* format 1 only */
153 u64 *snap_sizes; /* format 1 only */
157 * An rbd image specification.
159 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
160 * identify an image. Each rbd_dev structure includes a pointer to
161 * an rbd_spec structure that encapsulates this identity.
163 * Each of the id's in an rbd_spec has an associated name. For a
164 * user-mapped image, the names are supplied and the id's associated
165 * with them are looked up. For a layered image, a parent image is
166 * defined by the tuple, and the names are looked up.
168 * An rbd_dev structure contains a parent_spec pointer which is
169 * non-null if the image it represents is a child in a layered
170 * image. This pointer will refer to the rbd_spec structure used
171 * by the parent rbd_dev for its own identity (i.e., the structure
172 * is shared between the parent and child).
174 * Since these structures are populated once, during the discovery
175 * phase of image construction, they are effectively immutable so
176 * we make no effort to synchronize access to them.
178 * Note that code herein does not assume the image name is known (it
179 * could be a null pointer).
183 const char *pool_name;
184 const char *pool_ns; /* NULL if default, never "" */
186 const char *image_id;
187 const char *image_name;
190 const char *snap_name;
196 * an instance of the client. multiple devices may share an rbd client.
199 struct ceph_client *client;
201 struct list_head node;
204 struct rbd_img_request;
206 enum obj_request_type {
207 OBJ_REQUEST_NODATA = 1,
208 OBJ_REQUEST_BIO, /* pointer into provided bio (list) */
209 OBJ_REQUEST_BVECS, /* pointer into provided bio_vec array */
210 OBJ_REQUEST_OWN_BVECS, /* private bio_vec array, doesn't own pages */
213 enum obj_operation_type {
220 * Writes go through the following state machine to deal with
224 * RBD_OBJ_WRITE_GUARD ---------------> RBD_OBJ_WRITE_COPYUP
226 * v \------------------------------/
232 * Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether
233 * there is a parent or not.
235 enum rbd_obj_write_state {
236 RBD_OBJ_WRITE_FLAT = 1,
238 RBD_OBJ_WRITE_COPYUP,
241 struct rbd_obj_request {
242 struct ceph_object_extent ex;
244 bool tried_parent; /* for reads */
245 enum rbd_obj_write_state write_state; /* for writes */
248 struct rbd_img_request *img_request;
249 struct ceph_file_extent *img_extents;
253 struct ceph_bio_iter bio_pos;
255 struct ceph_bvec_iter bvec_pos;
260 struct bio_vec *copyup_bvecs;
261 u32 copyup_bvec_count;
263 struct ceph_osd_request *osd_req;
265 u64 xferred; /* bytes transferred */
272 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
273 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
276 struct rbd_img_request {
277 struct rbd_device *rbd_dev;
278 enum obj_operation_type op_type;
279 enum obj_request_type data_type;
282 u64 snap_id; /* for reads */
283 struct ceph_snap_context *snapc; /* for writes */
286 struct request *rq; /* block request */
287 struct rbd_obj_request *obj_request; /* obj req initiator */
289 spinlock_t completion_lock;
290 u64 xferred;/* aggregate bytes transferred */
291 int result; /* first nonzero obj_request result */
293 struct list_head object_extents; /* obj_req.ex structs */
294 u32 obj_request_count;
300 #define for_each_obj_request(ireq, oreq) \
301 list_for_each_entry(oreq, &(ireq)->object_extents, ex.oe_item)
302 #define for_each_obj_request_safe(ireq, oreq, n) \
303 list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item)
305 enum rbd_watch_state {
306 RBD_WATCH_STATE_UNREGISTERED,
307 RBD_WATCH_STATE_REGISTERED,
308 RBD_WATCH_STATE_ERROR,
311 enum rbd_lock_state {
312 RBD_LOCK_STATE_UNLOCKED,
313 RBD_LOCK_STATE_LOCKED,
314 RBD_LOCK_STATE_RELEASING,
317 /* WatchNotify::ClientId */
318 struct rbd_client_id {
332 int dev_id; /* blkdev unique id */
334 int major; /* blkdev assigned major */
336 struct gendisk *disk; /* blkdev's gendisk and rq */
338 u32 image_format; /* Either 1 or 2 */
339 struct rbd_client *rbd_client;
341 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
343 spinlock_t lock; /* queue, flags, open_count */
345 struct rbd_image_header header;
346 unsigned long flags; /* possibly lock protected */
347 struct rbd_spec *spec;
348 struct rbd_options *opts;
349 char *config_info; /* add{,_single_major} string */
351 struct ceph_object_id header_oid;
352 struct ceph_object_locator header_oloc;
354 struct ceph_file_layout layout; /* used for all rbd requests */
356 struct mutex watch_mutex;
357 enum rbd_watch_state watch_state;
358 struct ceph_osd_linger_request *watch_handle;
360 struct delayed_work watch_dwork;
362 struct rw_semaphore lock_rwsem;
363 enum rbd_lock_state lock_state;
364 char lock_cookie[32];
365 struct rbd_client_id owner_cid;
366 struct work_struct acquired_lock_work;
367 struct work_struct released_lock_work;
368 struct delayed_work lock_dwork;
369 struct work_struct unlock_work;
370 wait_queue_head_t lock_waitq;
372 struct workqueue_struct *task_wq;
374 struct rbd_spec *parent_spec;
377 struct rbd_device *parent;
379 /* Block layer tags. */
380 struct blk_mq_tag_set tag_set;
382 /* protects updating the header */
383 struct rw_semaphore header_rwsem;
385 struct rbd_mapping mapping;
387 struct list_head node;
391 unsigned long open_count; /* protected by lock */
395 * Flag bits for rbd_dev->flags:
396 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
398 * - BLACKLISTED is protected by rbd_dev->lock_rwsem
401 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
402 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
403 RBD_DEV_FLAG_BLACKLISTED, /* our ceph_client is blacklisted */
406 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
408 static LIST_HEAD(rbd_dev_list); /* devices */
409 static DEFINE_SPINLOCK(rbd_dev_list_lock);
411 static LIST_HEAD(rbd_client_list); /* clients */
412 static DEFINE_SPINLOCK(rbd_client_list_lock);
414 /* Slab caches for frequently-allocated structures */
416 static struct kmem_cache *rbd_img_request_cache;
417 static struct kmem_cache *rbd_obj_request_cache;
419 static int rbd_major;
420 static DEFINE_IDA(rbd_dev_id_ida);
422 static struct workqueue_struct *rbd_wq;
425 * single-major requires >= 0.75 version of userspace rbd utility.
427 static bool single_major = true;
428 module_param(single_major, bool, 0444);
429 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
431 static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count);
432 static ssize_t remove_store(struct bus_type *bus, const char *buf,
434 static ssize_t add_single_major_store(struct bus_type *bus, const char *buf,
436 static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf,
438 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
440 static int rbd_dev_id_to_minor(int dev_id)
442 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
445 static int minor_to_rbd_dev_id(int minor)
447 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
450 static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
452 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
453 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
456 static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
460 down_read(&rbd_dev->lock_rwsem);
461 is_lock_owner = __rbd_is_lock_owner(rbd_dev);
462 up_read(&rbd_dev->lock_rwsem);
463 return is_lock_owner;
466 static ssize_t supported_features_show(struct bus_type *bus, char *buf)
468 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
471 static BUS_ATTR_WO(add);
472 static BUS_ATTR_WO(remove);
473 static BUS_ATTR_WO(add_single_major);
474 static BUS_ATTR_WO(remove_single_major);
475 static BUS_ATTR_RO(supported_features);
477 static struct attribute *rbd_bus_attrs[] = {
479 &bus_attr_remove.attr,
480 &bus_attr_add_single_major.attr,
481 &bus_attr_remove_single_major.attr,
482 &bus_attr_supported_features.attr,
486 static umode_t rbd_bus_is_visible(struct kobject *kobj,
487 struct attribute *attr, int index)
490 (attr == &bus_attr_add_single_major.attr ||
491 attr == &bus_attr_remove_single_major.attr))
497 static const struct attribute_group rbd_bus_group = {
498 .attrs = rbd_bus_attrs,
499 .is_visible = rbd_bus_is_visible,
501 __ATTRIBUTE_GROUPS(rbd_bus);
503 static struct bus_type rbd_bus_type = {
505 .bus_groups = rbd_bus_groups,
508 static void rbd_root_dev_release(struct device *dev)
512 static struct device rbd_root_dev = {
514 .release = rbd_root_dev_release,
517 static __printf(2, 3)
518 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
520 struct va_format vaf;
528 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
529 else if (rbd_dev->disk)
530 printk(KERN_WARNING "%s: %s: %pV\n",
531 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
532 else if (rbd_dev->spec && rbd_dev->spec->image_name)
533 printk(KERN_WARNING "%s: image %s: %pV\n",
534 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
535 else if (rbd_dev->spec && rbd_dev->spec->image_id)
536 printk(KERN_WARNING "%s: id %s: %pV\n",
537 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
539 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
540 RBD_DRV_NAME, rbd_dev, &vaf);
545 #define rbd_assert(expr) \
546 if (unlikely(!(expr))) { \
547 printk(KERN_ERR "\nAssertion failure in %s() " \
549 "\trbd_assert(%s);\n\n", \
550 __func__, __LINE__, #expr); \
553 #else /* !RBD_DEBUG */
554 # define rbd_assert(expr) ((void) 0)
555 #endif /* !RBD_DEBUG */
557 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
559 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
560 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
561 static int rbd_dev_header_info(struct rbd_device *rbd_dev);
562 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
563 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
565 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
566 u8 *order, u64 *snap_size);
567 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
570 static int rbd_open(struct block_device *bdev, fmode_t mode)
572 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
573 bool removing = false;
575 spin_lock_irq(&rbd_dev->lock);
576 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
579 rbd_dev->open_count++;
580 spin_unlock_irq(&rbd_dev->lock);
584 (void) get_device(&rbd_dev->dev);
589 static void rbd_release(struct gendisk *disk, fmode_t mode)
591 struct rbd_device *rbd_dev = disk->private_data;
592 unsigned long open_count_before;
594 spin_lock_irq(&rbd_dev->lock);
595 open_count_before = rbd_dev->open_count--;
596 spin_unlock_irq(&rbd_dev->lock);
597 rbd_assert(open_count_before > 0);
599 put_device(&rbd_dev->dev);
602 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
606 if (get_user(ro, (int __user *)arg))
609 /* Snapshots can't be marked read-write */
610 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
613 /* Let blkdev_roset() handle it */
617 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
618 unsigned int cmd, unsigned long arg)
620 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
625 ret = rbd_ioctl_set_ro(rbd_dev, arg);
635 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
636 unsigned int cmd, unsigned long arg)
638 return rbd_ioctl(bdev, mode, cmd, arg);
640 #endif /* CONFIG_COMPAT */
642 static const struct block_device_operations rbd_bd_ops = {
643 .owner = THIS_MODULE,
645 .release = rbd_release,
648 .compat_ioctl = rbd_compat_ioctl,
653 * Initialize an rbd client instance. Success or not, this function
654 * consumes ceph_opts. Caller holds client_mutex.
656 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
658 struct rbd_client *rbdc;
661 dout("%s:\n", __func__);
662 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
666 kref_init(&rbdc->kref);
667 INIT_LIST_HEAD(&rbdc->node);
669 rbdc->client = ceph_create_client(ceph_opts, rbdc);
670 if (IS_ERR(rbdc->client))
672 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
674 ret = ceph_open_session(rbdc->client);
678 spin_lock(&rbd_client_list_lock);
679 list_add_tail(&rbdc->node, &rbd_client_list);
680 spin_unlock(&rbd_client_list_lock);
682 dout("%s: rbdc %p\n", __func__, rbdc);
686 ceph_destroy_client(rbdc->client);
691 ceph_destroy_options(ceph_opts);
692 dout("%s: error %d\n", __func__, ret);
697 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
699 kref_get(&rbdc->kref);
705 * Find a ceph client with specific addr and configuration. If
706 * found, bump its reference count.
708 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
710 struct rbd_client *client_node;
713 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
716 spin_lock(&rbd_client_list_lock);
717 list_for_each_entry(client_node, &rbd_client_list, node) {
718 if (!ceph_compare_options(ceph_opts, client_node->client)) {
719 __rbd_get_client(client_node);
725 spin_unlock(&rbd_client_list_lock);
727 return found ? client_node : NULL;
731 * (Per device) rbd map options
740 /* string args above */
749 static match_table_t rbd_opts_tokens = {
750 {Opt_queue_depth, "queue_depth=%d"},
751 {Opt_lock_timeout, "lock_timeout=%d"},
753 {Opt_pool_ns, "_pool_ns=%s"},
754 /* string args above */
755 {Opt_read_only, "read_only"},
756 {Opt_read_only, "ro"}, /* Alternate spelling */
757 {Opt_read_write, "read_write"},
758 {Opt_read_write, "rw"}, /* Alternate spelling */
759 {Opt_lock_on_read, "lock_on_read"},
760 {Opt_exclusive, "exclusive"},
761 {Opt_notrim, "notrim"},
767 unsigned long lock_timeout;
774 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
775 #define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
776 #define RBD_READ_ONLY_DEFAULT false
777 #define RBD_LOCK_ON_READ_DEFAULT false
778 #define RBD_EXCLUSIVE_DEFAULT false
779 #define RBD_TRIM_DEFAULT true
781 struct parse_rbd_opts_ctx {
782 struct rbd_spec *spec;
783 struct rbd_options *opts;
786 static int parse_rbd_opts_token(char *c, void *private)
788 struct parse_rbd_opts_ctx *pctx = private;
789 substring_t argstr[MAX_OPT_ARGS];
790 int token, intval, ret;
792 token = match_token(c, rbd_opts_tokens, argstr);
793 if (token < Opt_last_int) {
794 ret = match_int(&argstr[0], &intval);
796 pr_err("bad option arg (not int) at '%s'\n", c);
799 dout("got int token %d val %d\n", token, intval);
800 } else if (token > Opt_last_int && token < Opt_last_string) {
801 dout("got string token %d val %s\n", token, argstr[0].from);
803 dout("got token %d\n", token);
807 case Opt_queue_depth:
809 pr_err("queue_depth out of range\n");
812 pctx->opts->queue_depth = intval;
814 case Opt_lock_timeout:
815 /* 0 is "wait forever" (i.e. infinite timeout) */
816 if (intval < 0 || intval > INT_MAX / 1000) {
817 pr_err("lock_timeout out of range\n");
820 pctx->opts->lock_timeout = msecs_to_jiffies(intval * 1000);
823 kfree(pctx->spec->pool_ns);
824 pctx->spec->pool_ns = match_strdup(argstr);
825 if (!pctx->spec->pool_ns)
829 pctx->opts->read_only = true;
832 pctx->opts->read_only = false;
834 case Opt_lock_on_read:
835 pctx->opts->lock_on_read = true;
838 pctx->opts->exclusive = true;
841 pctx->opts->trim = false;
844 /* libceph prints "bad option" msg */
851 static char* obj_op_name(enum obj_operation_type op_type)
866 * Destroy ceph client
868 * Caller must hold rbd_client_list_lock.
870 static void rbd_client_release(struct kref *kref)
872 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
874 dout("%s: rbdc %p\n", __func__, rbdc);
875 spin_lock(&rbd_client_list_lock);
876 list_del(&rbdc->node);
877 spin_unlock(&rbd_client_list_lock);
879 ceph_destroy_client(rbdc->client);
884 * Drop reference to ceph client node. If it's not referenced anymore, release
887 static void rbd_put_client(struct rbd_client *rbdc)
890 kref_put(&rbdc->kref, rbd_client_release);
893 static int wait_for_latest_osdmap(struct ceph_client *client)
898 ret = ceph_monc_get_version(&client->monc, "osdmap", &newest_epoch);
902 if (client->osdc.osdmap->epoch >= newest_epoch)
905 ceph_osdc_maybe_request_map(&client->osdc);
906 return ceph_monc_wait_osdmap(&client->monc, newest_epoch,
907 client->options->mount_timeout);
911 * Get a ceph client with specific addr and configuration, if one does
912 * not exist create it. Either way, ceph_opts is consumed by this
915 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
917 struct rbd_client *rbdc;
920 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
921 rbdc = rbd_client_find(ceph_opts);
923 ceph_destroy_options(ceph_opts);
926 * Using an existing client. Make sure ->pg_pools is up to
927 * date before we look up the pool id in do_rbd_add().
929 ret = wait_for_latest_osdmap(rbdc->client);
931 rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
932 rbd_put_client(rbdc);
936 rbdc = rbd_client_create(ceph_opts);
938 mutex_unlock(&client_mutex);
943 static bool rbd_image_format_valid(u32 image_format)
945 return image_format == 1 || image_format == 2;
948 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
953 /* The header has to start with the magic rbd header text */
954 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
957 /* The bio layer requires at least sector-sized I/O */
959 if (ondisk->options.order < SECTOR_SHIFT)
962 /* If we use u64 in a few spots we may be able to loosen this */
964 if (ondisk->options.order > 8 * sizeof (int) - 1)
968 * The size of a snapshot header has to fit in a size_t, and
969 * that limits the number of snapshots.
971 snap_count = le32_to_cpu(ondisk->snap_count);
972 size = SIZE_MAX - sizeof (struct ceph_snap_context);
973 if (snap_count > size / sizeof (__le64))
977 * Not only that, but the size of the entire the snapshot
978 * header must also be representable in a size_t.
980 size -= snap_count * sizeof (__le64);
981 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
988 * returns the size of an object in the image
990 static u32 rbd_obj_bytes(struct rbd_image_header *header)
992 return 1U << header->obj_order;
995 static void rbd_init_layout(struct rbd_device *rbd_dev)
997 if (rbd_dev->header.stripe_unit == 0 ||
998 rbd_dev->header.stripe_count == 0) {
999 rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header);
1000 rbd_dev->header.stripe_count = 1;
1003 rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit;
1004 rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count;
1005 rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header);
1006 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ?
1007 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id;
1008 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
1012 * Fill an rbd image header with information from the given format 1
1015 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
1016 struct rbd_image_header_ondisk *ondisk)
1018 struct rbd_image_header *header = &rbd_dev->header;
1019 bool first_time = header->object_prefix == NULL;
1020 struct ceph_snap_context *snapc;
1021 char *object_prefix = NULL;
1022 char *snap_names = NULL;
1023 u64 *snap_sizes = NULL;
1028 /* Allocate this now to avoid having to handle failure below */
1031 object_prefix = kstrndup(ondisk->object_prefix,
1032 sizeof(ondisk->object_prefix),
1038 /* Allocate the snapshot context and fill it in */
1040 snap_count = le32_to_cpu(ondisk->snap_count);
1041 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
1044 snapc->seq = le64_to_cpu(ondisk->snap_seq);
1046 struct rbd_image_snap_ondisk *snaps;
1047 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
1049 /* We'll keep a copy of the snapshot names... */
1051 if (snap_names_len > (u64)SIZE_MAX)
1053 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
1057 /* ...as well as the array of their sizes. */
1058 snap_sizes = kmalloc_array(snap_count,
1059 sizeof(*header->snap_sizes),
1065 * Copy the names, and fill in each snapshot's id
1068 * Note that rbd_dev_v1_header_info() guarantees the
1069 * ondisk buffer we're working with has
1070 * snap_names_len bytes beyond the end of the
1071 * snapshot id array, this memcpy() is safe.
1073 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
1074 snaps = ondisk->snaps;
1075 for (i = 0; i < snap_count; i++) {
1076 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
1077 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
1081 /* We won't fail any more, fill in the header */
1084 header->object_prefix = object_prefix;
1085 header->obj_order = ondisk->options.order;
1086 rbd_init_layout(rbd_dev);
1088 ceph_put_snap_context(header->snapc);
1089 kfree(header->snap_names);
1090 kfree(header->snap_sizes);
1093 /* The remaining fields always get updated (when we refresh) */
1095 header->image_size = le64_to_cpu(ondisk->image_size);
1096 header->snapc = snapc;
1097 header->snap_names = snap_names;
1098 header->snap_sizes = snap_sizes;
1106 ceph_put_snap_context(snapc);
1107 kfree(object_prefix);
1112 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1114 const char *snap_name;
1116 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1118 /* Skip over names until we find the one we are looking for */
1120 snap_name = rbd_dev->header.snap_names;
1122 snap_name += strlen(snap_name) + 1;
1124 return kstrdup(snap_name, GFP_KERNEL);
1128 * Snapshot id comparison function for use with qsort()/bsearch().
1129 * Note that result is for snapshots in *descending* order.
1131 static int snapid_compare_reverse(const void *s1, const void *s2)
1133 u64 snap_id1 = *(u64 *)s1;
1134 u64 snap_id2 = *(u64 *)s2;
1136 if (snap_id1 < snap_id2)
1138 return snap_id1 == snap_id2 ? 0 : -1;
1142 * Search a snapshot context to see if the given snapshot id is
1145 * Returns the position of the snapshot id in the array if it's found,
1146 * or BAD_SNAP_INDEX otherwise.
1148 * Note: The snapshot array is in kept sorted (by the osd) in
1149 * reverse order, highest snapshot id first.
1151 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1153 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1156 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1157 sizeof (snap_id), snapid_compare_reverse);
1159 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1162 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1166 const char *snap_name;
1168 which = rbd_dev_snap_index(rbd_dev, snap_id);
1169 if (which == BAD_SNAP_INDEX)
1170 return ERR_PTR(-ENOENT);
1172 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1173 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1176 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1178 if (snap_id == CEPH_NOSNAP)
1179 return RBD_SNAP_HEAD_NAME;
1181 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1182 if (rbd_dev->image_format == 1)
1183 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1185 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1188 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1191 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1192 if (snap_id == CEPH_NOSNAP) {
1193 *snap_size = rbd_dev->header.image_size;
1194 } else if (rbd_dev->image_format == 1) {
1197 which = rbd_dev_snap_index(rbd_dev, snap_id);
1198 if (which == BAD_SNAP_INDEX)
1201 *snap_size = rbd_dev->header.snap_sizes[which];
1206 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1215 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1218 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1219 if (snap_id == CEPH_NOSNAP) {
1220 *snap_features = rbd_dev->header.features;
1221 } else if (rbd_dev->image_format == 1) {
1222 *snap_features = 0; /* No features for format 1 */
1227 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1231 *snap_features = features;
1236 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1238 u64 snap_id = rbd_dev->spec->snap_id;
1243 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1246 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1250 rbd_dev->mapping.size = size;
1251 rbd_dev->mapping.features = features;
1256 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1258 rbd_dev->mapping.size = 0;
1259 rbd_dev->mapping.features = 0;
1262 static void zero_bvec(struct bio_vec *bv)
1265 unsigned long flags;
1267 buf = bvec_kmap_irq(bv, &flags);
1268 memset(buf, 0, bv->bv_len);
1269 flush_dcache_page(bv->bv_page);
1270 bvec_kunmap_irq(buf, &flags);
1273 static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes)
1275 struct ceph_bio_iter it = *bio_pos;
1277 ceph_bio_iter_advance(&it, off);
1278 ceph_bio_iter_advance_step(&it, bytes, ({
1283 static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes)
1285 struct ceph_bvec_iter it = *bvec_pos;
1287 ceph_bvec_iter_advance(&it, off);
1288 ceph_bvec_iter_advance_step(&it, bytes, ({
1294 * Zero a range in @obj_req data buffer defined by a bio (list) or
1295 * (private) bio_vec array.
1297 * @off is relative to the start of the data buffer.
1299 static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off,
1302 switch (obj_req->img_request->data_type) {
1303 case OBJ_REQUEST_BIO:
1304 zero_bios(&obj_req->bio_pos, off, bytes);
1306 case OBJ_REQUEST_BVECS:
1307 case OBJ_REQUEST_OWN_BVECS:
1308 zero_bvecs(&obj_req->bvec_pos, off, bytes);
1315 static void rbd_obj_request_destroy(struct kref *kref);
1316 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1318 rbd_assert(obj_request != NULL);
1319 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1320 kref_read(&obj_request->kref));
1321 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1324 static void rbd_img_request_get(struct rbd_img_request *img_request)
1326 dout("%s: img %p (was %d)\n", __func__, img_request,
1327 kref_read(&img_request->kref));
1328 kref_get(&img_request->kref);
1331 static void rbd_img_request_destroy(struct kref *kref);
1332 static void rbd_img_request_put(struct rbd_img_request *img_request)
1334 rbd_assert(img_request != NULL);
1335 dout("%s: img %p (was %d)\n", __func__, img_request,
1336 kref_read(&img_request->kref));
1337 kref_put(&img_request->kref, rbd_img_request_destroy);
1340 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1341 struct rbd_obj_request *obj_request)
1343 rbd_assert(obj_request->img_request == NULL);
1345 /* Image request now owns object's original reference */
1346 obj_request->img_request = img_request;
1347 img_request->obj_request_count++;
1348 img_request->pending_count++;
1349 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1352 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1353 struct rbd_obj_request *obj_request)
1355 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1356 list_del(&obj_request->ex.oe_item);
1357 rbd_assert(img_request->obj_request_count > 0);
1358 img_request->obj_request_count--;
1359 rbd_assert(obj_request->img_request == img_request);
1360 rbd_obj_request_put(obj_request);
1363 static void rbd_obj_request_submit(struct rbd_obj_request *obj_request)
1365 struct ceph_osd_request *osd_req = obj_request->osd_req;
1367 dout("%s %p object_no %016llx %llu~%llu osd_req %p\n", __func__,
1368 obj_request, obj_request->ex.oe_objno, obj_request->ex.oe_off,
1369 obj_request->ex.oe_len, osd_req);
1370 ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
1374 * The default/initial value for all image request flags is 0. Each
1375 * is conditionally set to 1 at image request initialization time
1376 * and currently never change thereafter.
1378 static void img_request_layered_set(struct rbd_img_request *img_request)
1380 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1384 static void img_request_layered_clear(struct rbd_img_request *img_request)
1386 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1390 static bool img_request_layered_test(struct rbd_img_request *img_request)
1393 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1396 static bool rbd_obj_is_entire(struct rbd_obj_request *obj_req)
1398 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1400 return !obj_req->ex.oe_off &&
1401 obj_req->ex.oe_len == rbd_dev->layout.object_size;
1404 static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
1406 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1408 return obj_req->ex.oe_off + obj_req->ex.oe_len ==
1409 rbd_dev->layout.object_size;
1412 static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
1414 return ceph_file_extents_bytes(obj_req->img_extents,
1415 obj_req->num_img_extents);
1418 static bool rbd_img_is_write(struct rbd_img_request *img_req)
1420 switch (img_req->op_type) {
1424 case OBJ_OP_DISCARD:
1431 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req);
1433 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
1435 struct rbd_obj_request *obj_req = osd_req->r_priv;
1437 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
1438 osd_req->r_result, obj_req);
1439 rbd_assert(osd_req == obj_req->osd_req);
1441 obj_req->result = osd_req->r_result < 0 ? osd_req->r_result : 0;
1442 if (!obj_req->result && !rbd_img_is_write(obj_req->img_request))
1443 obj_req->xferred = osd_req->r_result;
1446 * Writes aren't allowed to return a data payload. In some
1447 * guarded write cases (e.g. stat + zero on an empty object)
1448 * a stat response makes it through, but we don't care.
1450 obj_req->xferred = 0;
1452 rbd_obj_handle_request(obj_req);
1455 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1457 struct ceph_osd_request *osd_req = obj_request->osd_req;
1459 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1460 osd_req->r_snapid = obj_request->img_request->snap_id;
1463 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1465 struct ceph_osd_request *osd_req = obj_request->osd_req;
1467 osd_req->r_flags = CEPH_OSD_FLAG_WRITE;
1468 ktime_get_real_ts64(&osd_req->r_mtime);
1469 osd_req->r_data_offset = obj_request->ex.oe_off;
1472 static struct ceph_osd_request *
1473 rbd_osd_req_create(struct rbd_obj_request *obj_req, unsigned int num_ops)
1475 struct rbd_img_request *img_req = obj_req->img_request;
1476 struct rbd_device *rbd_dev = img_req->rbd_dev;
1477 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1478 struct ceph_osd_request *req;
1479 const char *name_format = rbd_dev->image_format == 1 ?
1480 RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT;
1482 req = ceph_osdc_alloc_request(osdc,
1483 (rbd_img_is_write(img_req) ? img_req->snapc : NULL),
1484 num_ops, false, GFP_NOIO);
1488 req->r_callback = rbd_osd_req_callback;
1489 req->r_priv = obj_req;
1492 * Data objects may be stored in a separate pool, but always in
1493 * the same namespace in that pool as the header in its pool.
1495 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
1496 req->r_base_oloc.pool = rbd_dev->layout.pool_id;
1498 if (ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format,
1499 rbd_dev->header.object_prefix, obj_req->ex.oe_objno))
1505 ceph_osdc_put_request(req);
1509 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1511 ceph_osdc_put_request(osd_req);
1514 static struct rbd_obj_request *rbd_obj_request_create(void)
1516 struct rbd_obj_request *obj_request;
1518 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
1522 ceph_object_extent_init(&obj_request->ex);
1523 kref_init(&obj_request->kref);
1525 dout("%s %p\n", __func__, obj_request);
1529 static void rbd_obj_request_destroy(struct kref *kref)
1531 struct rbd_obj_request *obj_request;
1534 obj_request = container_of(kref, struct rbd_obj_request, kref);
1536 dout("%s: obj %p\n", __func__, obj_request);
1538 if (obj_request->osd_req)
1539 rbd_osd_req_destroy(obj_request->osd_req);
1541 switch (obj_request->img_request->data_type) {
1542 case OBJ_REQUEST_NODATA:
1543 case OBJ_REQUEST_BIO:
1544 case OBJ_REQUEST_BVECS:
1545 break; /* Nothing to do */
1546 case OBJ_REQUEST_OWN_BVECS:
1547 kfree(obj_request->bvec_pos.bvecs);
1553 kfree(obj_request->img_extents);
1554 if (obj_request->copyup_bvecs) {
1555 for (i = 0; i < obj_request->copyup_bvec_count; i++) {
1556 if (obj_request->copyup_bvecs[i].bv_page)
1557 __free_page(obj_request->copyup_bvecs[i].bv_page);
1559 kfree(obj_request->copyup_bvecs);
1562 kmem_cache_free(rbd_obj_request_cache, obj_request);
1565 /* It's OK to call this for a device with no parent */
1567 static void rbd_spec_put(struct rbd_spec *spec);
1568 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1570 rbd_dev_remove_parent(rbd_dev);
1571 rbd_spec_put(rbd_dev->parent_spec);
1572 rbd_dev->parent_spec = NULL;
1573 rbd_dev->parent_overlap = 0;
1577 * Parent image reference counting is used to determine when an
1578 * image's parent fields can be safely torn down--after there are no
1579 * more in-flight requests to the parent image. When the last
1580 * reference is dropped, cleaning them up is safe.
1582 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1586 if (!rbd_dev->parent_spec)
1589 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1593 /* Last reference; clean up parent data structures */
1596 rbd_dev_unparent(rbd_dev);
1598 rbd_warn(rbd_dev, "parent reference underflow");
1602 * If an image has a non-zero parent overlap, get a reference to its
1605 * Returns true if the rbd device has a parent with a non-zero
1606 * overlap and a reference for it was successfully taken, or
1609 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1613 if (!rbd_dev->parent_spec)
1616 down_read(&rbd_dev->header_rwsem);
1617 if (rbd_dev->parent_overlap)
1618 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1619 up_read(&rbd_dev->header_rwsem);
1622 rbd_warn(rbd_dev, "parent reference overflow");
1628 * Caller is responsible for filling in the list of object requests
1629 * that comprises the image request, and the Linux request pointer
1630 * (if there is one).
1632 static struct rbd_img_request *rbd_img_request_create(
1633 struct rbd_device *rbd_dev,
1634 enum obj_operation_type op_type,
1635 struct ceph_snap_context *snapc)
1637 struct rbd_img_request *img_request;
1639 img_request = kmem_cache_zalloc(rbd_img_request_cache, GFP_NOIO);
1643 img_request->rbd_dev = rbd_dev;
1644 img_request->op_type = op_type;
1645 if (!rbd_img_is_write(img_request))
1646 img_request->snap_id = rbd_dev->spec->snap_id;
1648 img_request->snapc = snapc;
1650 if (rbd_dev_parent_get(rbd_dev))
1651 img_request_layered_set(img_request);
1653 spin_lock_init(&img_request->completion_lock);
1654 INIT_LIST_HEAD(&img_request->object_extents);
1655 kref_init(&img_request->kref);
1657 dout("%s: rbd_dev %p %s -> img %p\n", __func__, rbd_dev,
1658 obj_op_name(op_type), img_request);
1662 static void rbd_img_request_destroy(struct kref *kref)
1664 struct rbd_img_request *img_request;
1665 struct rbd_obj_request *obj_request;
1666 struct rbd_obj_request *next_obj_request;
1668 img_request = container_of(kref, struct rbd_img_request, kref);
1670 dout("%s: img %p\n", __func__, img_request);
1672 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1673 rbd_img_obj_request_del(img_request, obj_request);
1674 rbd_assert(img_request->obj_request_count == 0);
1676 if (img_request_layered_test(img_request)) {
1677 img_request_layered_clear(img_request);
1678 rbd_dev_parent_put(img_request->rbd_dev);
1681 if (rbd_img_is_write(img_request))
1682 ceph_put_snap_context(img_request->snapc);
1684 kmem_cache_free(rbd_img_request_cache, img_request);
1687 static void prune_extents(struct ceph_file_extent *img_extents,
1688 u32 *num_img_extents, u64 overlap)
1690 u32 cnt = *num_img_extents;
1692 /* drop extents completely beyond the overlap */
1693 while (cnt && img_extents[cnt - 1].fe_off >= overlap)
1697 struct ceph_file_extent *ex = &img_extents[cnt - 1];
1699 /* trim final overlapping extent */
1700 if (ex->fe_off + ex->fe_len > overlap)
1701 ex->fe_len = overlap - ex->fe_off;
1704 *num_img_extents = cnt;
1708 * Determine the byte range(s) covered by either just the object extent
1709 * or the entire object in the parent image.
1711 static int rbd_obj_calc_img_extents(struct rbd_obj_request *obj_req,
1714 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1717 if (!rbd_dev->parent_overlap)
1720 ret = ceph_extent_to_file(&rbd_dev->layout, obj_req->ex.oe_objno,
1721 entire ? 0 : obj_req->ex.oe_off,
1722 entire ? rbd_dev->layout.object_size :
1724 &obj_req->img_extents,
1725 &obj_req->num_img_extents);
1729 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
1730 rbd_dev->parent_overlap);
1734 static void rbd_osd_req_setup_data(struct rbd_obj_request *obj_req, u32 which)
1736 switch (obj_req->img_request->data_type) {
1737 case OBJ_REQUEST_BIO:
1738 osd_req_op_extent_osd_data_bio(obj_req->osd_req, which,
1740 obj_req->ex.oe_len);
1742 case OBJ_REQUEST_BVECS:
1743 case OBJ_REQUEST_OWN_BVECS:
1744 rbd_assert(obj_req->bvec_pos.iter.bi_size ==
1745 obj_req->ex.oe_len);
1746 rbd_assert(obj_req->bvec_idx == obj_req->bvec_count);
1747 osd_req_op_extent_osd_data_bvec_pos(obj_req->osd_req, which,
1748 &obj_req->bvec_pos);
1755 static int rbd_obj_setup_read(struct rbd_obj_request *obj_req)
1757 obj_req->osd_req = rbd_osd_req_create(obj_req, 1);
1758 if (!obj_req->osd_req)
1761 osd_req_op_extent_init(obj_req->osd_req, 0, CEPH_OSD_OP_READ,
1762 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
1763 rbd_osd_req_setup_data(obj_req, 0);
1765 rbd_osd_req_format_read(obj_req);
1769 static int __rbd_obj_setup_stat(struct rbd_obj_request *obj_req,
1772 struct page **pages;
1775 * The response data for a STAT call consists of:
1782 pages = ceph_alloc_page_vector(1, GFP_NOIO);
1784 return PTR_ERR(pages);
1786 osd_req_op_init(obj_req->osd_req, which, CEPH_OSD_OP_STAT, 0);
1787 osd_req_op_raw_data_in_pages(obj_req->osd_req, which, pages,
1788 8 + sizeof(struct ceph_timespec),
1793 static void __rbd_obj_setup_write(struct rbd_obj_request *obj_req,
1796 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1799 osd_req_op_alloc_hint_init(obj_req->osd_req, which++,
1800 rbd_dev->layout.object_size,
1801 rbd_dev->layout.object_size);
1803 if (rbd_obj_is_entire(obj_req))
1804 opcode = CEPH_OSD_OP_WRITEFULL;
1806 opcode = CEPH_OSD_OP_WRITE;
1808 osd_req_op_extent_init(obj_req->osd_req, which, opcode,
1809 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
1810 rbd_osd_req_setup_data(obj_req, which++);
1812 rbd_assert(which == obj_req->osd_req->r_num_ops);
1813 rbd_osd_req_format_write(obj_req);
1816 static int rbd_obj_setup_write(struct rbd_obj_request *obj_req)
1818 unsigned int num_osd_ops, which = 0;
1821 /* reverse map the entire object onto the parent */
1822 ret = rbd_obj_calc_img_extents(obj_req, true);
1826 if (obj_req->num_img_extents) {
1827 obj_req->write_state = RBD_OBJ_WRITE_GUARD;
1828 num_osd_ops = 3; /* stat + setallochint + write/writefull */
1830 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
1831 num_osd_ops = 2; /* setallochint + write/writefull */
1834 obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
1835 if (!obj_req->osd_req)
1838 if (obj_req->num_img_extents) {
1839 ret = __rbd_obj_setup_stat(obj_req, which++);
1844 __rbd_obj_setup_write(obj_req, which);
1848 static void __rbd_obj_setup_discard(struct rbd_obj_request *obj_req,
1853 if (rbd_obj_is_entire(obj_req)) {
1854 if (obj_req->num_img_extents) {
1855 osd_req_op_init(obj_req->osd_req, which++,
1856 CEPH_OSD_OP_CREATE, 0);
1857 opcode = CEPH_OSD_OP_TRUNCATE;
1859 osd_req_op_init(obj_req->osd_req, which++,
1860 CEPH_OSD_OP_DELETE, 0);
1863 } else if (rbd_obj_is_tail(obj_req)) {
1864 opcode = CEPH_OSD_OP_TRUNCATE;
1866 opcode = CEPH_OSD_OP_ZERO;
1870 osd_req_op_extent_init(obj_req->osd_req, which++, opcode,
1871 obj_req->ex.oe_off, obj_req->ex.oe_len,
1874 rbd_assert(which == obj_req->osd_req->r_num_ops);
1875 rbd_osd_req_format_write(obj_req);
1878 static int rbd_obj_setup_discard(struct rbd_obj_request *obj_req)
1880 unsigned int num_osd_ops, which = 0;
1883 /* reverse map the entire object onto the parent */
1884 ret = rbd_obj_calc_img_extents(obj_req, true);
1888 if (rbd_obj_is_entire(obj_req)) {
1889 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
1890 if (obj_req->num_img_extents)
1891 num_osd_ops = 2; /* create + truncate */
1893 num_osd_ops = 1; /* delete */
1895 if (obj_req->num_img_extents) {
1896 obj_req->write_state = RBD_OBJ_WRITE_GUARD;
1897 num_osd_ops = 2; /* stat + truncate/zero */
1899 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
1900 num_osd_ops = 1; /* truncate/zero */
1904 obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
1905 if (!obj_req->osd_req)
1908 if (!rbd_obj_is_entire(obj_req) && obj_req->num_img_extents) {
1909 ret = __rbd_obj_setup_stat(obj_req, which++);
1914 __rbd_obj_setup_discard(obj_req, which);
1919 * For each object request in @img_req, allocate an OSD request, add
1920 * individual OSD ops and prepare them for submission. The number of
1921 * OSD ops depends on op_type and the overlap point (if any).
1923 static int __rbd_img_fill_request(struct rbd_img_request *img_req)
1925 struct rbd_obj_request *obj_req;
1928 for_each_obj_request(img_req, obj_req) {
1929 switch (img_req->op_type) {
1931 ret = rbd_obj_setup_read(obj_req);
1934 ret = rbd_obj_setup_write(obj_req);
1936 case OBJ_OP_DISCARD:
1937 ret = rbd_obj_setup_discard(obj_req);
1945 ret = ceph_osdc_alloc_messages(obj_req->osd_req, GFP_NOIO);
1953 union rbd_img_fill_iter {
1954 struct ceph_bio_iter bio_iter;
1955 struct ceph_bvec_iter bvec_iter;
1958 struct rbd_img_fill_ctx {
1959 enum obj_request_type pos_type;
1960 union rbd_img_fill_iter *pos;
1961 union rbd_img_fill_iter iter;
1962 ceph_object_extent_fn_t set_pos_fn;
1963 ceph_object_extent_fn_t count_fn;
1964 ceph_object_extent_fn_t copy_fn;
1967 static struct ceph_object_extent *alloc_object_extent(void *arg)
1969 struct rbd_img_request *img_req = arg;
1970 struct rbd_obj_request *obj_req;
1972 obj_req = rbd_obj_request_create();
1976 rbd_img_obj_request_add(img_req, obj_req);
1977 return &obj_req->ex;
1981 * While su != os && sc == 1 is technically not fancy (it's the same
1982 * layout as su == os && sc == 1), we can't use the nocopy path for it
1983 * because ->set_pos_fn() should be called only once per object.
1984 * ceph_file_to_extents() invokes action_fn once per stripe unit, so
1985 * treat su != os && sc == 1 as fancy.
1987 static bool rbd_layout_is_fancy(struct ceph_file_layout *l)
1989 return l->stripe_unit != l->object_size;
1992 static int rbd_img_fill_request_nocopy(struct rbd_img_request *img_req,
1993 struct ceph_file_extent *img_extents,
1994 u32 num_img_extents,
1995 struct rbd_img_fill_ctx *fctx)
2000 img_req->data_type = fctx->pos_type;
2003 * Create object requests and set each object request's starting
2004 * position in the provided bio (list) or bio_vec array.
2006 fctx->iter = *fctx->pos;
2007 for (i = 0; i < num_img_extents; i++) {
2008 ret = ceph_file_to_extents(&img_req->rbd_dev->layout,
2009 img_extents[i].fe_off,
2010 img_extents[i].fe_len,
2011 &img_req->object_extents,
2012 alloc_object_extent, img_req,
2013 fctx->set_pos_fn, &fctx->iter);
2018 return __rbd_img_fill_request(img_req);
2022 * Map a list of image extents to a list of object extents, create the
2023 * corresponding object requests (normally each to a different object,
2024 * but not always) and add them to @img_req. For each object request,
2025 * set up its data descriptor to point to the corresponding chunk(s) of
2026 * @fctx->pos data buffer.
2028 * Because ceph_file_to_extents() will merge adjacent object extents
2029 * together, each object request's data descriptor may point to multiple
2030 * different chunks of @fctx->pos data buffer.
2032 * @fctx->pos data buffer is assumed to be large enough.
2034 static int rbd_img_fill_request(struct rbd_img_request *img_req,
2035 struct ceph_file_extent *img_extents,
2036 u32 num_img_extents,
2037 struct rbd_img_fill_ctx *fctx)
2039 struct rbd_device *rbd_dev = img_req->rbd_dev;
2040 struct rbd_obj_request *obj_req;
2044 if (fctx->pos_type == OBJ_REQUEST_NODATA ||
2045 !rbd_layout_is_fancy(&rbd_dev->layout))
2046 return rbd_img_fill_request_nocopy(img_req, img_extents,
2047 num_img_extents, fctx);
2049 img_req->data_type = OBJ_REQUEST_OWN_BVECS;
2052 * Create object requests and determine ->bvec_count for each object
2053 * request. Note that ->bvec_count sum over all object requests may
2054 * be greater than the number of bio_vecs in the provided bio (list)
2055 * or bio_vec array because when mapped, those bio_vecs can straddle
2056 * stripe unit boundaries.
2058 fctx->iter = *fctx->pos;
2059 for (i = 0; i < num_img_extents; i++) {
2060 ret = ceph_file_to_extents(&rbd_dev->layout,
2061 img_extents[i].fe_off,
2062 img_extents[i].fe_len,
2063 &img_req->object_extents,
2064 alloc_object_extent, img_req,
2065 fctx->count_fn, &fctx->iter);
2070 for_each_obj_request(img_req, obj_req) {
2071 obj_req->bvec_pos.bvecs = kmalloc_array(obj_req->bvec_count,
2072 sizeof(*obj_req->bvec_pos.bvecs),
2074 if (!obj_req->bvec_pos.bvecs)
2079 * Fill in each object request's private bio_vec array, splitting and
2080 * rearranging the provided bio_vecs in stripe unit chunks as needed.
2082 fctx->iter = *fctx->pos;
2083 for (i = 0; i < num_img_extents; i++) {
2084 ret = ceph_iterate_extents(&rbd_dev->layout,
2085 img_extents[i].fe_off,
2086 img_extents[i].fe_len,
2087 &img_req->object_extents,
2088 fctx->copy_fn, &fctx->iter);
2093 return __rbd_img_fill_request(img_req);
2096 static int rbd_img_fill_nodata(struct rbd_img_request *img_req,
2099 struct ceph_file_extent ex = { off, len };
2100 union rbd_img_fill_iter dummy;
2101 struct rbd_img_fill_ctx fctx = {
2102 .pos_type = OBJ_REQUEST_NODATA,
2106 return rbd_img_fill_request(img_req, &ex, 1, &fctx);
2109 static void set_bio_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2111 struct rbd_obj_request *obj_req =
2112 container_of(ex, struct rbd_obj_request, ex);
2113 struct ceph_bio_iter *it = arg;
2115 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2116 obj_req->bio_pos = *it;
2117 ceph_bio_iter_advance(it, bytes);
2120 static void count_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2122 struct rbd_obj_request *obj_req =
2123 container_of(ex, struct rbd_obj_request, ex);
2124 struct ceph_bio_iter *it = arg;
2126 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2127 ceph_bio_iter_advance_step(it, bytes, ({
2128 obj_req->bvec_count++;
2133 static void copy_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2135 struct rbd_obj_request *obj_req =
2136 container_of(ex, struct rbd_obj_request, ex);
2137 struct ceph_bio_iter *it = arg;
2139 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2140 ceph_bio_iter_advance_step(it, bytes, ({
2141 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2142 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2146 static int __rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2147 struct ceph_file_extent *img_extents,
2148 u32 num_img_extents,
2149 struct ceph_bio_iter *bio_pos)
2151 struct rbd_img_fill_ctx fctx = {
2152 .pos_type = OBJ_REQUEST_BIO,
2153 .pos = (union rbd_img_fill_iter *)bio_pos,
2154 .set_pos_fn = set_bio_pos,
2155 .count_fn = count_bio_bvecs,
2156 .copy_fn = copy_bio_bvecs,
2159 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2163 static int rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2164 u64 off, u64 len, struct bio *bio)
2166 struct ceph_file_extent ex = { off, len };
2167 struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter };
2169 return __rbd_img_fill_from_bio(img_req, &ex, 1, &it);
2172 static void set_bvec_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2174 struct rbd_obj_request *obj_req =
2175 container_of(ex, struct rbd_obj_request, ex);
2176 struct ceph_bvec_iter *it = arg;
2178 obj_req->bvec_pos = *it;
2179 ceph_bvec_iter_shorten(&obj_req->bvec_pos, bytes);
2180 ceph_bvec_iter_advance(it, bytes);
2183 static void count_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2185 struct rbd_obj_request *obj_req =
2186 container_of(ex, struct rbd_obj_request, ex);
2187 struct ceph_bvec_iter *it = arg;
2189 ceph_bvec_iter_advance_step(it, bytes, ({
2190 obj_req->bvec_count++;
2194 static void copy_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2196 struct rbd_obj_request *obj_req =
2197 container_of(ex, struct rbd_obj_request, ex);
2198 struct ceph_bvec_iter *it = arg;
2200 ceph_bvec_iter_advance_step(it, bytes, ({
2201 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2202 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2206 static int __rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2207 struct ceph_file_extent *img_extents,
2208 u32 num_img_extents,
2209 struct ceph_bvec_iter *bvec_pos)
2211 struct rbd_img_fill_ctx fctx = {
2212 .pos_type = OBJ_REQUEST_BVECS,
2213 .pos = (union rbd_img_fill_iter *)bvec_pos,
2214 .set_pos_fn = set_bvec_pos,
2215 .count_fn = count_bvecs,
2216 .copy_fn = copy_bvecs,
2219 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2223 static int rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2224 struct ceph_file_extent *img_extents,
2225 u32 num_img_extents,
2226 struct bio_vec *bvecs)
2228 struct ceph_bvec_iter it = {
2230 .iter = { .bi_size = ceph_file_extents_bytes(img_extents,
2234 return __rbd_img_fill_from_bvecs(img_req, img_extents, num_img_extents,
2238 static void rbd_img_request_submit(struct rbd_img_request *img_request)
2240 struct rbd_obj_request *obj_request;
2242 dout("%s: img %p\n", __func__, img_request);
2244 rbd_img_request_get(img_request);
2245 for_each_obj_request(img_request, obj_request)
2246 rbd_obj_request_submit(obj_request);
2248 rbd_img_request_put(img_request);
2251 static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
2253 struct rbd_img_request *img_req = obj_req->img_request;
2254 struct rbd_img_request *child_img_req;
2257 child_img_req = rbd_img_request_create(img_req->rbd_dev->parent,
2262 __set_bit(IMG_REQ_CHILD, &child_img_req->flags);
2263 child_img_req->obj_request = obj_req;
2265 if (!rbd_img_is_write(img_req)) {
2266 switch (img_req->data_type) {
2267 case OBJ_REQUEST_BIO:
2268 ret = __rbd_img_fill_from_bio(child_img_req,
2269 obj_req->img_extents,
2270 obj_req->num_img_extents,
2273 case OBJ_REQUEST_BVECS:
2274 case OBJ_REQUEST_OWN_BVECS:
2275 ret = __rbd_img_fill_from_bvecs(child_img_req,
2276 obj_req->img_extents,
2277 obj_req->num_img_extents,
2278 &obj_req->bvec_pos);
2284 ret = rbd_img_fill_from_bvecs(child_img_req,
2285 obj_req->img_extents,
2286 obj_req->num_img_extents,
2287 obj_req->copyup_bvecs);
2290 rbd_img_request_put(child_img_req);
2294 rbd_img_request_submit(child_img_req);
2298 static bool rbd_obj_handle_read(struct rbd_obj_request *obj_req)
2300 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2303 if (obj_req->result == -ENOENT &&
2304 rbd_dev->parent_overlap && !obj_req->tried_parent) {
2305 /* reverse map this object extent onto the parent */
2306 ret = rbd_obj_calc_img_extents(obj_req, false);
2308 obj_req->result = ret;
2312 if (obj_req->num_img_extents) {
2313 obj_req->tried_parent = true;
2314 ret = rbd_obj_read_from_parent(obj_req);
2316 obj_req->result = ret;
2324 * -ENOENT means a hole in the image -- zero-fill the entire
2325 * length of the request. A short read also implies zero-fill
2326 * to the end of the request. In both cases we update xferred
2327 * count to indicate the whole request was satisfied.
2329 if (obj_req->result == -ENOENT ||
2330 (!obj_req->result && obj_req->xferred < obj_req->ex.oe_len)) {
2331 rbd_assert(!obj_req->xferred || !obj_req->result);
2332 rbd_obj_zero_range(obj_req, obj_req->xferred,
2333 obj_req->ex.oe_len - obj_req->xferred);
2334 obj_req->result = 0;
2335 obj_req->xferred = obj_req->ex.oe_len;
2342 * copyup_bvecs pages are never highmem pages
2344 static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes)
2346 struct ceph_bvec_iter it = {
2348 .iter = { .bi_size = bytes },
2351 ceph_bvec_iter_advance_step(&it, bytes, ({
2352 if (memchr_inv(page_address(bv.bv_page) + bv.bv_offset, 0,
2359 static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes)
2361 unsigned int num_osd_ops = obj_req->osd_req->r_num_ops;
2364 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
2365 rbd_assert(obj_req->osd_req->r_ops[0].op == CEPH_OSD_OP_STAT);
2366 rbd_osd_req_destroy(obj_req->osd_req);
2369 * Create a copyup request with the same number of OSD ops as
2370 * the original request. The original request was stat + op(s),
2371 * the new copyup request will be copyup + the same op(s).
2373 obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
2374 if (!obj_req->osd_req)
2377 ret = osd_req_op_cls_init(obj_req->osd_req, 0, "rbd", "copyup");
2382 * Only send non-zero copyup data to save some I/O and network
2383 * bandwidth -- zero copyup data is equivalent to the object not
2386 if (is_zero_bvecs(obj_req->copyup_bvecs, bytes)) {
2387 dout("%s obj_req %p detected zeroes\n", __func__, obj_req);
2390 osd_req_op_cls_request_data_bvecs(obj_req->osd_req, 0,
2391 obj_req->copyup_bvecs,
2392 obj_req->copyup_bvec_count,
2395 switch (obj_req->img_request->op_type) {
2397 __rbd_obj_setup_write(obj_req, 1);
2399 case OBJ_OP_DISCARD:
2400 rbd_assert(!rbd_obj_is_entire(obj_req));
2401 __rbd_obj_setup_discard(obj_req, 1);
2407 ret = ceph_osdc_alloc_messages(obj_req->osd_req, GFP_NOIO);
2411 rbd_obj_request_submit(obj_req);
2415 static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap)
2419 rbd_assert(!obj_req->copyup_bvecs);
2420 obj_req->copyup_bvec_count = calc_pages_for(0, obj_overlap);
2421 obj_req->copyup_bvecs = kcalloc(obj_req->copyup_bvec_count,
2422 sizeof(*obj_req->copyup_bvecs),
2424 if (!obj_req->copyup_bvecs)
2427 for (i = 0; i < obj_req->copyup_bvec_count; i++) {
2428 unsigned int len = min(obj_overlap, (u64)PAGE_SIZE);
2430 obj_req->copyup_bvecs[i].bv_page = alloc_page(GFP_NOIO);
2431 if (!obj_req->copyup_bvecs[i].bv_page)
2434 obj_req->copyup_bvecs[i].bv_offset = 0;
2435 obj_req->copyup_bvecs[i].bv_len = len;
2439 rbd_assert(!obj_overlap);
2443 static int rbd_obj_handle_write_guard(struct rbd_obj_request *obj_req)
2445 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2448 rbd_assert(obj_req->num_img_extents);
2449 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
2450 rbd_dev->parent_overlap);
2451 if (!obj_req->num_img_extents) {
2453 * The overlap has become 0 (most likely because the
2454 * image has been flattened). Use rbd_obj_issue_copyup()
2455 * to re-submit the original write request -- the copyup
2456 * operation itself will be a no-op, since someone must
2457 * have populated the child object while we weren't
2458 * looking. Move to WRITE_FLAT state as we'll be done
2459 * with the operation once the null copyup completes.
2461 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
2462 return rbd_obj_issue_copyup(obj_req, 0);
2465 ret = setup_copyup_bvecs(obj_req, rbd_obj_img_extents_bytes(obj_req));
2469 obj_req->write_state = RBD_OBJ_WRITE_COPYUP;
2470 return rbd_obj_read_from_parent(obj_req);
2473 static bool rbd_obj_handle_write(struct rbd_obj_request *obj_req)
2478 switch (obj_req->write_state) {
2479 case RBD_OBJ_WRITE_GUARD:
2480 rbd_assert(!obj_req->xferred);
2481 if (obj_req->result == -ENOENT) {
2483 * The target object doesn't exist. Read the data for
2484 * the entire target object up to the overlap point (if
2485 * any) from the parent, so we can use it for a copyup.
2487 ret = rbd_obj_handle_write_guard(obj_req);
2489 obj_req->result = ret;
2495 case RBD_OBJ_WRITE_FLAT:
2496 if (!obj_req->result)
2498 * There is no such thing as a successful short
2499 * write -- indicate the whole request was satisfied.
2501 obj_req->xferred = obj_req->ex.oe_len;
2503 case RBD_OBJ_WRITE_COPYUP:
2504 obj_req->write_state = RBD_OBJ_WRITE_GUARD;
2505 if (obj_req->result)
2508 rbd_assert(obj_req->xferred);
2509 ret = rbd_obj_issue_copyup(obj_req, obj_req->xferred);
2511 obj_req->result = ret;
2521 * Returns true if @obj_req is completed, or false otherwise.
2523 static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req)
2525 switch (obj_req->img_request->op_type) {
2527 return rbd_obj_handle_read(obj_req);
2529 return rbd_obj_handle_write(obj_req);
2530 case OBJ_OP_DISCARD:
2531 if (rbd_obj_handle_write(obj_req)) {
2533 * Hide -ENOENT from delete/truncate/zero -- discarding
2534 * a non-existent object is not a problem.
2536 if (obj_req->result == -ENOENT) {
2537 obj_req->result = 0;
2538 obj_req->xferred = obj_req->ex.oe_len;
2548 static void rbd_obj_end_request(struct rbd_obj_request *obj_req)
2550 struct rbd_img_request *img_req = obj_req->img_request;
2552 rbd_assert((!obj_req->result &&
2553 obj_req->xferred == obj_req->ex.oe_len) ||
2554 (obj_req->result < 0 && !obj_req->xferred));
2555 if (!obj_req->result) {
2556 img_req->xferred += obj_req->xferred;
2560 rbd_warn(img_req->rbd_dev,
2561 "%s at objno %llu %llu~%llu result %d xferred %llu",
2562 obj_op_name(img_req->op_type), obj_req->ex.oe_objno,
2563 obj_req->ex.oe_off, obj_req->ex.oe_len, obj_req->result,
2565 if (!img_req->result) {
2566 img_req->result = obj_req->result;
2567 img_req->xferred = 0;
2571 static void rbd_img_end_child_request(struct rbd_img_request *img_req)
2573 struct rbd_obj_request *obj_req = img_req->obj_request;
2575 rbd_assert(test_bit(IMG_REQ_CHILD, &img_req->flags));
2576 rbd_assert((!img_req->result &&
2577 img_req->xferred == rbd_obj_img_extents_bytes(obj_req)) ||
2578 (img_req->result < 0 && !img_req->xferred));
2580 obj_req->result = img_req->result;
2581 obj_req->xferred = img_req->xferred;
2582 rbd_img_request_put(img_req);
2585 static void rbd_img_end_request(struct rbd_img_request *img_req)
2587 rbd_assert(!test_bit(IMG_REQ_CHILD, &img_req->flags));
2588 rbd_assert((!img_req->result &&
2589 img_req->xferred == blk_rq_bytes(img_req->rq)) ||
2590 (img_req->result < 0 && !img_req->xferred));
2592 blk_mq_end_request(img_req->rq,
2593 errno_to_blk_status(img_req->result));
2594 rbd_img_request_put(img_req);
2597 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req)
2599 struct rbd_img_request *img_req;
2602 if (!__rbd_obj_handle_request(obj_req))
2605 img_req = obj_req->img_request;
2606 spin_lock(&img_req->completion_lock);
2607 rbd_obj_end_request(obj_req);
2608 rbd_assert(img_req->pending_count);
2609 if (--img_req->pending_count) {
2610 spin_unlock(&img_req->completion_lock);
2614 spin_unlock(&img_req->completion_lock);
2615 if (test_bit(IMG_REQ_CHILD, &img_req->flags)) {
2616 obj_req = img_req->obj_request;
2617 rbd_img_end_child_request(img_req);
2620 rbd_img_end_request(img_req);
2623 static const struct rbd_client_id rbd_empty_cid;
2625 static bool rbd_cid_equal(const struct rbd_client_id *lhs,
2626 const struct rbd_client_id *rhs)
2628 return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
2631 static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
2633 struct rbd_client_id cid;
2635 mutex_lock(&rbd_dev->watch_mutex);
2636 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
2637 cid.handle = rbd_dev->watch_cookie;
2638 mutex_unlock(&rbd_dev->watch_mutex);
2643 * lock_rwsem must be held for write
2645 static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
2646 const struct rbd_client_id *cid)
2648 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
2649 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
2650 cid->gid, cid->handle);
2651 rbd_dev->owner_cid = *cid; /* struct */
2654 static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
2656 mutex_lock(&rbd_dev->watch_mutex);
2657 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
2658 mutex_unlock(&rbd_dev->watch_mutex);
2661 static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
2663 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
2665 strcpy(rbd_dev->lock_cookie, cookie);
2666 rbd_set_owner_cid(rbd_dev, &cid);
2667 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
2671 * lock_rwsem must be held for write
2673 static int rbd_lock(struct rbd_device *rbd_dev)
2675 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2679 WARN_ON(__rbd_is_lock_owner(rbd_dev) ||
2680 rbd_dev->lock_cookie[0] != '\0');
2682 format_lock_cookie(rbd_dev, cookie);
2683 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
2684 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
2685 RBD_LOCK_TAG, "", 0);
2689 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
2690 __rbd_lock(rbd_dev, cookie);
2695 * lock_rwsem must be held for write
2697 static void rbd_unlock(struct rbd_device *rbd_dev)
2699 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2702 WARN_ON(!__rbd_is_lock_owner(rbd_dev) ||
2703 rbd_dev->lock_cookie[0] == '\0');
2705 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
2706 RBD_LOCK_NAME, rbd_dev->lock_cookie);
2707 if (ret && ret != -ENOENT)
2708 rbd_warn(rbd_dev, "failed to unlock: %d", ret);
2710 /* treat errors as the image is unlocked */
2711 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
2712 rbd_dev->lock_cookie[0] = '\0';
2713 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
2714 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
2717 static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
2718 enum rbd_notify_op notify_op,
2719 struct page ***preply_pages,
2722 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2723 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
2724 char buf[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN];
2725 int buf_size = sizeof(buf);
2728 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
2730 /* encode *LockPayload NotifyMessage (op + ClientId) */
2731 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
2732 ceph_encode_32(&p, notify_op);
2733 ceph_encode_64(&p, cid.gid);
2734 ceph_encode_64(&p, cid.handle);
2736 return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
2737 &rbd_dev->header_oloc, buf, buf_size,
2738 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
2741 static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
2742 enum rbd_notify_op notify_op)
2744 struct page **reply_pages;
2747 __rbd_notify_op_lock(rbd_dev, notify_op, &reply_pages, &reply_len);
2748 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
2751 static void rbd_notify_acquired_lock(struct work_struct *work)
2753 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
2754 acquired_lock_work);
2756 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
2759 static void rbd_notify_released_lock(struct work_struct *work)
2761 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
2762 released_lock_work);
2764 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
2767 static int rbd_request_lock(struct rbd_device *rbd_dev)
2769 struct page **reply_pages;
2771 bool lock_owner_responded = false;
2774 dout("%s rbd_dev %p\n", __func__, rbd_dev);
2776 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
2777 &reply_pages, &reply_len);
2778 if (ret && ret != -ETIMEDOUT) {
2779 rbd_warn(rbd_dev, "failed to request lock: %d", ret);
2783 if (reply_len > 0 && reply_len <= PAGE_SIZE) {
2784 void *p = page_address(reply_pages[0]);
2785 void *const end = p + reply_len;
2788 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
2793 ceph_decode_need(&p, end, 8 + 8, e_inval);
2794 p += 8 + 8; /* skip gid and cookie */
2796 ceph_decode_32_safe(&p, end, len, e_inval);
2800 if (lock_owner_responded) {
2802 "duplicate lock owners detected");
2807 lock_owner_responded = true;
2808 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
2812 "failed to decode ResponseMessage: %d",
2817 ret = ceph_decode_32(&p);
2821 if (!lock_owner_responded) {
2822 rbd_warn(rbd_dev, "no lock owners detected");
2827 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
2835 static void wake_requests(struct rbd_device *rbd_dev, bool wake_all)
2837 dout("%s rbd_dev %p wake_all %d\n", __func__, rbd_dev, wake_all);
2839 cancel_delayed_work(&rbd_dev->lock_dwork);
2841 wake_up_all(&rbd_dev->lock_waitq);
2843 wake_up(&rbd_dev->lock_waitq);
2846 static int get_lock_owner_info(struct rbd_device *rbd_dev,
2847 struct ceph_locker **lockers, u32 *num_lockers)
2849 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2854 dout("%s rbd_dev %p\n", __func__, rbd_dev);
2856 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
2857 &rbd_dev->header_oloc, RBD_LOCK_NAME,
2858 &lock_type, &lock_tag, lockers, num_lockers);
2862 if (*num_lockers == 0) {
2863 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
2867 if (strcmp(lock_tag, RBD_LOCK_TAG)) {
2868 rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
2874 if (lock_type == CEPH_CLS_LOCK_SHARED) {
2875 rbd_warn(rbd_dev, "shared lock type detected");
2880 if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
2881 strlen(RBD_LOCK_COOKIE_PREFIX))) {
2882 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
2883 (*lockers)[0].id.cookie);
2893 static int find_watcher(struct rbd_device *rbd_dev,
2894 const struct ceph_locker *locker)
2896 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2897 struct ceph_watch_item *watchers;
2903 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
2904 &rbd_dev->header_oloc, &watchers,
2909 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
2910 for (i = 0; i < num_watchers; i++) {
2911 if (!memcmp(&watchers[i].addr, &locker->info.addr,
2912 sizeof(locker->info.addr)) &&
2913 watchers[i].cookie == cookie) {
2914 struct rbd_client_id cid = {
2915 .gid = le64_to_cpu(watchers[i].name.num),
2919 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
2920 rbd_dev, cid.gid, cid.handle);
2921 rbd_set_owner_cid(rbd_dev, &cid);
2927 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
2935 * lock_rwsem must be held for write
2937 static int rbd_try_lock(struct rbd_device *rbd_dev)
2939 struct ceph_client *client = rbd_dev->rbd_client->client;
2940 struct ceph_locker *lockers;
2945 ret = rbd_lock(rbd_dev);
2949 /* determine if the current lock holder is still alive */
2950 ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
2954 if (num_lockers == 0)
2957 ret = find_watcher(rbd_dev, lockers);
2960 ret = 0; /* have to request lock */
2964 rbd_warn(rbd_dev, "%s%llu seems dead, breaking lock",
2965 ENTITY_NAME(lockers[0].id.name));
2967 ret = ceph_monc_blacklist_add(&client->monc,
2968 &lockers[0].info.addr);
2970 rbd_warn(rbd_dev, "blacklist of %s%llu failed: %d",
2971 ENTITY_NAME(lockers[0].id.name), ret);
2975 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
2976 &rbd_dev->header_oloc, RBD_LOCK_NAME,
2977 lockers[0].id.cookie,
2978 &lockers[0].id.name);
2979 if (ret && ret != -ENOENT)
2983 ceph_free_lockers(lockers, num_lockers);
2987 ceph_free_lockers(lockers, num_lockers);
2992 * ret is set only if lock_state is RBD_LOCK_STATE_UNLOCKED
2994 static enum rbd_lock_state rbd_try_acquire_lock(struct rbd_device *rbd_dev,
2997 enum rbd_lock_state lock_state;
2999 down_read(&rbd_dev->lock_rwsem);
3000 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
3001 rbd_dev->lock_state);
3002 if (__rbd_is_lock_owner(rbd_dev)) {
3003 lock_state = rbd_dev->lock_state;
3004 up_read(&rbd_dev->lock_rwsem);
3008 up_read(&rbd_dev->lock_rwsem);
3009 down_write(&rbd_dev->lock_rwsem);
3010 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
3011 rbd_dev->lock_state);
3012 if (!__rbd_is_lock_owner(rbd_dev)) {
3013 *pret = rbd_try_lock(rbd_dev);
3015 rbd_warn(rbd_dev, "failed to acquire lock: %d", *pret);
3018 lock_state = rbd_dev->lock_state;
3019 up_write(&rbd_dev->lock_rwsem);
3023 static void rbd_acquire_lock(struct work_struct *work)
3025 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3026 struct rbd_device, lock_dwork);
3027 enum rbd_lock_state lock_state;
3030 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3032 lock_state = rbd_try_acquire_lock(rbd_dev, &ret);
3033 if (lock_state != RBD_LOCK_STATE_UNLOCKED || ret == -EBLACKLISTED) {
3034 if (lock_state == RBD_LOCK_STATE_LOCKED)
3035 wake_requests(rbd_dev, true);
3036 dout("%s rbd_dev %p lock_state %d ret %d - done\n", __func__,
3037 rbd_dev, lock_state, ret);
3041 ret = rbd_request_lock(rbd_dev);
3042 if (ret == -ETIMEDOUT) {
3043 goto again; /* treat this as a dead client */
3044 } else if (ret == -EROFS) {
3045 rbd_warn(rbd_dev, "peer will not release lock");
3047 * If this is rbd_add_acquire_lock(), we want to fail
3048 * immediately -- reuse BLACKLISTED flag. Otherwise we
3051 if (!(rbd_dev->disk->flags & GENHD_FL_UP)) {
3052 set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
3053 /* wake "rbd map --exclusive" process */
3054 wake_requests(rbd_dev, false);
3056 } else if (ret < 0) {
3057 rbd_warn(rbd_dev, "error requesting lock: %d", ret);
3058 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3062 * lock owner acked, but resend if we don't see them
3065 dout("%s rbd_dev %p requeueing lock_dwork\n", __func__,
3067 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3068 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
3073 * lock_rwsem must be held for write
3075 static bool rbd_release_lock(struct rbd_device *rbd_dev)
3077 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
3078 rbd_dev->lock_state);
3079 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
3082 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
3083 downgrade_write(&rbd_dev->lock_rwsem);
3085 * Ensure that all in-flight IO is flushed.
3087 * FIXME: ceph_osdc_sync() flushes the entire OSD client, which
3088 * may be shared with other devices.
3090 ceph_osdc_sync(&rbd_dev->rbd_client->client->osdc);
3091 up_read(&rbd_dev->lock_rwsem);
3093 down_write(&rbd_dev->lock_rwsem);
3094 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
3095 rbd_dev->lock_state);
3096 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
3099 rbd_unlock(rbd_dev);
3101 * Give others a chance to grab the lock - we would re-acquire
3102 * almost immediately if we got new IO during ceph_osdc_sync()
3103 * otherwise. We need to ack our own notifications, so this
3104 * lock_dwork will be requeued from rbd_wait_state_locked()
3105 * after wake_requests() in rbd_handle_released_lock().
3107 cancel_delayed_work(&rbd_dev->lock_dwork);
3111 static void rbd_release_lock_work(struct work_struct *work)
3113 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3116 down_write(&rbd_dev->lock_rwsem);
3117 rbd_release_lock(rbd_dev);
3118 up_write(&rbd_dev->lock_rwsem);
3121 static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
3124 struct rbd_client_id cid = { 0 };
3126 if (struct_v >= 2) {
3127 cid.gid = ceph_decode_64(p);
3128 cid.handle = ceph_decode_64(p);
3131 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3133 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3134 down_write(&rbd_dev->lock_rwsem);
3135 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3137 * we already know that the remote client is
3140 up_write(&rbd_dev->lock_rwsem);
3144 rbd_set_owner_cid(rbd_dev, &cid);
3145 downgrade_write(&rbd_dev->lock_rwsem);
3147 down_read(&rbd_dev->lock_rwsem);
3150 if (!__rbd_is_lock_owner(rbd_dev))
3151 wake_requests(rbd_dev, false);
3152 up_read(&rbd_dev->lock_rwsem);
3155 static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
3158 struct rbd_client_id cid = { 0 };
3160 if (struct_v >= 2) {
3161 cid.gid = ceph_decode_64(p);
3162 cid.handle = ceph_decode_64(p);
3165 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3167 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3168 down_write(&rbd_dev->lock_rwsem);
3169 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3170 dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
3171 __func__, rbd_dev, cid.gid, cid.handle,
3172 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
3173 up_write(&rbd_dev->lock_rwsem);
3177 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3178 downgrade_write(&rbd_dev->lock_rwsem);
3180 down_read(&rbd_dev->lock_rwsem);
3183 if (!__rbd_is_lock_owner(rbd_dev))
3184 wake_requests(rbd_dev, false);
3185 up_read(&rbd_dev->lock_rwsem);
3189 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no
3190 * ResponseMessage is needed.
3192 static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
3195 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
3196 struct rbd_client_id cid = { 0 };
3199 if (struct_v >= 2) {
3200 cid.gid = ceph_decode_64(p);
3201 cid.handle = ceph_decode_64(p);
3204 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3206 if (rbd_cid_equal(&cid, &my_cid))
3209 down_read(&rbd_dev->lock_rwsem);
3210 if (__rbd_is_lock_owner(rbd_dev)) {
3211 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED &&
3212 rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid))
3216 * encode ResponseMessage(0) so the peer can detect
3221 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
3222 if (!rbd_dev->opts->exclusive) {
3223 dout("%s rbd_dev %p queueing unlock_work\n",
3225 queue_work(rbd_dev->task_wq,
3226 &rbd_dev->unlock_work);
3228 /* refuse to release the lock */
3235 up_read(&rbd_dev->lock_rwsem);
3239 static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
3240 u64 notify_id, u64 cookie, s32 *result)
3242 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3243 char buf[4 + CEPH_ENCODING_START_BLK_LEN];
3244 int buf_size = sizeof(buf);
3250 /* encode ResponseMessage */
3251 ceph_start_encoding(&p, 1, 1,
3252 buf_size - CEPH_ENCODING_START_BLK_LEN);
3253 ceph_encode_32(&p, *result);
3258 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
3259 &rbd_dev->header_oloc, notify_id, cookie,
3262 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
3265 static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
3268 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3269 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
3272 static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
3273 u64 notify_id, u64 cookie, s32 result)
3275 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
3276 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
3279 static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
3280 u64 notifier_id, void *data, size_t data_len)
3282 struct rbd_device *rbd_dev = arg;
3284 void *const end = p + data_len;
3290 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
3291 __func__, rbd_dev, cookie, notify_id, data_len);
3293 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
3296 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
3301 notify_op = ceph_decode_32(&p);
3303 /* legacy notification for header updates */
3304 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
3308 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
3309 switch (notify_op) {
3310 case RBD_NOTIFY_OP_ACQUIRED_LOCK:
3311 rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
3312 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3314 case RBD_NOTIFY_OP_RELEASED_LOCK:
3315 rbd_handle_released_lock(rbd_dev, struct_v, &p);
3316 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3318 case RBD_NOTIFY_OP_REQUEST_LOCK:
3319 ret = rbd_handle_request_lock(rbd_dev, struct_v, &p);
3321 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3324 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3326 case RBD_NOTIFY_OP_HEADER_UPDATE:
3327 ret = rbd_dev_refresh(rbd_dev);
3329 rbd_warn(rbd_dev, "refresh failed: %d", ret);
3331 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3334 if (rbd_is_lock_owner(rbd_dev))
3335 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3336 cookie, -EOPNOTSUPP);
3338 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3343 static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
3345 static void rbd_watch_errcb(void *arg, u64 cookie, int err)
3347 struct rbd_device *rbd_dev = arg;
3349 rbd_warn(rbd_dev, "encountered watch error: %d", err);
3351 down_write(&rbd_dev->lock_rwsem);
3352 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3353 up_write(&rbd_dev->lock_rwsem);
3355 mutex_lock(&rbd_dev->watch_mutex);
3356 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
3357 __rbd_unregister_watch(rbd_dev);
3358 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
3360 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
3362 mutex_unlock(&rbd_dev->watch_mutex);
3366 * watch_mutex must be locked
3368 static int __rbd_register_watch(struct rbd_device *rbd_dev)
3370 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3371 struct ceph_osd_linger_request *handle;
3373 rbd_assert(!rbd_dev->watch_handle);
3374 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3376 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
3377 &rbd_dev->header_oloc, rbd_watch_cb,
3378 rbd_watch_errcb, rbd_dev);
3380 return PTR_ERR(handle);
3382 rbd_dev->watch_handle = handle;
3387 * watch_mutex must be locked
3389 static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
3391 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3394 rbd_assert(rbd_dev->watch_handle);
3395 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3397 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
3399 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
3401 rbd_dev->watch_handle = NULL;
3404 static int rbd_register_watch(struct rbd_device *rbd_dev)
3408 mutex_lock(&rbd_dev->watch_mutex);
3409 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
3410 ret = __rbd_register_watch(rbd_dev);
3414 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
3415 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
3418 mutex_unlock(&rbd_dev->watch_mutex);
3422 static void cancel_tasks_sync(struct rbd_device *rbd_dev)
3424 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3426 cancel_work_sync(&rbd_dev->acquired_lock_work);
3427 cancel_work_sync(&rbd_dev->released_lock_work);
3428 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
3429 cancel_work_sync(&rbd_dev->unlock_work);
3432 static void rbd_unregister_watch(struct rbd_device *rbd_dev)
3434 WARN_ON(waitqueue_active(&rbd_dev->lock_waitq));
3435 cancel_tasks_sync(rbd_dev);
3437 mutex_lock(&rbd_dev->watch_mutex);
3438 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
3439 __rbd_unregister_watch(rbd_dev);
3440 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
3441 mutex_unlock(&rbd_dev->watch_mutex);
3443 cancel_delayed_work_sync(&rbd_dev->watch_dwork);
3444 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
3448 * lock_rwsem must be held for write
3450 static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
3452 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3456 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
3458 format_lock_cookie(rbd_dev, cookie);
3459 ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid,
3460 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3461 CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie,
3462 RBD_LOCK_TAG, cookie);
3464 if (ret != -EOPNOTSUPP)
3465 rbd_warn(rbd_dev, "failed to update lock cookie: %d",
3469 * Lock cookie cannot be updated on older OSDs, so do
3470 * a manual release and queue an acquire.
3472 if (rbd_release_lock(rbd_dev))
3473 queue_delayed_work(rbd_dev->task_wq,
3474 &rbd_dev->lock_dwork, 0);
3476 __rbd_lock(rbd_dev, cookie);
3480 static void rbd_reregister_watch(struct work_struct *work)
3482 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3483 struct rbd_device, watch_dwork);
3486 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3488 mutex_lock(&rbd_dev->watch_mutex);
3489 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
3490 mutex_unlock(&rbd_dev->watch_mutex);
3494 ret = __rbd_register_watch(rbd_dev);
3496 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
3497 if (ret == -EBLACKLISTED || ret == -ENOENT) {
3498 set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
3499 wake_requests(rbd_dev, true);
3501 queue_delayed_work(rbd_dev->task_wq,
3502 &rbd_dev->watch_dwork,
3505 mutex_unlock(&rbd_dev->watch_mutex);
3509 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
3510 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
3511 mutex_unlock(&rbd_dev->watch_mutex);
3513 down_write(&rbd_dev->lock_rwsem);
3514 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
3515 rbd_reacquire_lock(rbd_dev);
3516 up_write(&rbd_dev->lock_rwsem);
3518 ret = rbd_dev_refresh(rbd_dev);
3520 rbd_warn(rbd_dev, "reregistration refresh failed: %d", ret);
3524 * Synchronous osd object method call. Returns the number of bytes
3525 * returned in the outbound buffer, or a negative error code.
3527 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3528 struct ceph_object_id *oid,
3529 struct ceph_object_locator *oloc,
3530 const char *method_name,
3531 const void *outbound,
3532 size_t outbound_size,
3534 size_t inbound_size)
3536 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3537 struct page *req_page = NULL;
3538 struct page *reply_page;
3542 * Method calls are ultimately read operations. The result
3543 * should placed into the inbound buffer provided. They
3544 * also supply outbound data--parameters for the object
3545 * method. Currently if this is present it will be a
3549 if (outbound_size > PAGE_SIZE)
3552 req_page = alloc_page(GFP_KERNEL);
3556 memcpy(page_address(req_page), outbound, outbound_size);
3559 reply_page = alloc_page(GFP_KERNEL);
3562 __free_page(req_page);
3566 ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name,
3567 CEPH_OSD_FLAG_READ, req_page, outbound_size,
3568 reply_page, &inbound_size);
3570 memcpy(inbound, page_address(reply_page), inbound_size);
3575 __free_page(req_page);
3576 __free_page(reply_page);
3581 * lock_rwsem must be held for read
3583 static int rbd_wait_state_locked(struct rbd_device *rbd_dev, bool may_acquire)
3586 unsigned long timeout;
3589 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags))
3590 return -EBLACKLISTED;
3592 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
3596 rbd_warn(rbd_dev, "exclusive lock required");
3602 * Note the use of mod_delayed_work() in rbd_acquire_lock()
3603 * and cancel_delayed_work() in wake_requests().
3605 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
3606 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
3607 prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait,
3608 TASK_UNINTERRUPTIBLE);
3609 up_read(&rbd_dev->lock_rwsem);
3610 timeout = schedule_timeout(ceph_timeout_jiffies(
3611 rbd_dev->opts->lock_timeout));
3612 down_read(&rbd_dev->lock_rwsem);
3613 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
3614 ret = -EBLACKLISTED;
3618 rbd_warn(rbd_dev, "timed out waiting for lock");
3622 } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
3624 finish_wait(&rbd_dev->lock_waitq, &wait);
3628 static void rbd_queue_workfn(struct work_struct *work)
3630 struct request *rq = blk_mq_rq_from_pdu(work);
3631 struct rbd_device *rbd_dev = rq->q->queuedata;
3632 struct rbd_img_request *img_request;
3633 struct ceph_snap_context *snapc = NULL;
3634 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
3635 u64 length = blk_rq_bytes(rq);
3636 enum obj_operation_type op_type;
3638 bool must_be_locked;
3641 switch (req_op(rq)) {
3642 case REQ_OP_DISCARD:
3643 case REQ_OP_WRITE_ZEROES:
3644 op_type = OBJ_OP_DISCARD;
3647 op_type = OBJ_OP_WRITE;
3650 op_type = OBJ_OP_READ;
3653 dout("%s: non-fs request type %d\n", __func__, req_op(rq));
3658 /* Ignore/skip any zero-length requests */
3661 dout("%s: zero-length request\n", __func__);
3666 rbd_assert(op_type == OBJ_OP_READ ||
3667 rbd_dev->spec->snap_id == CEPH_NOSNAP);
3670 * Quit early if the mapped snapshot no longer exists. It's
3671 * still possible the snapshot will have disappeared by the
3672 * time our request arrives at the osd, but there's no sense in
3673 * sending it if we already know.
3675 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3676 dout("request for non-existent snapshot");
3677 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3682 if (offset && length > U64_MAX - offset + 1) {
3683 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
3686 goto err_rq; /* Shouldn't happen */
3689 blk_mq_start_request(rq);
3691 down_read(&rbd_dev->header_rwsem);
3692 mapping_size = rbd_dev->mapping.size;
3693 if (op_type != OBJ_OP_READ) {
3694 snapc = rbd_dev->header.snapc;
3695 ceph_get_snap_context(snapc);
3697 up_read(&rbd_dev->header_rwsem);
3699 if (offset + length > mapping_size) {
3700 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
3701 length, mapping_size);
3707 (rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK) &&
3708 (op_type != OBJ_OP_READ || rbd_dev->opts->lock_on_read);
3709 if (must_be_locked) {
3710 down_read(&rbd_dev->lock_rwsem);
3711 result = rbd_wait_state_locked(rbd_dev,
3712 !rbd_dev->opts->exclusive);
3717 img_request = rbd_img_request_create(rbd_dev, op_type, snapc);
3722 img_request->rq = rq;
3723 snapc = NULL; /* img_request consumes a ref */
3725 if (op_type == OBJ_OP_DISCARD)
3726 result = rbd_img_fill_nodata(img_request, offset, length);
3728 result = rbd_img_fill_from_bio(img_request, offset, length,
3731 goto err_img_request;
3733 rbd_img_request_submit(img_request);
3735 up_read(&rbd_dev->lock_rwsem);
3739 rbd_img_request_put(img_request);
3742 up_read(&rbd_dev->lock_rwsem);
3745 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
3746 obj_op_name(op_type), length, offset, result);
3747 ceph_put_snap_context(snapc);
3749 blk_mq_end_request(rq, errno_to_blk_status(result));
3752 static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
3753 const struct blk_mq_queue_data *bd)
3755 struct request *rq = bd->rq;
3756 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3758 queue_work(rbd_wq, work);
3762 static void rbd_free_disk(struct rbd_device *rbd_dev)
3764 blk_cleanup_queue(rbd_dev->disk->queue);
3765 blk_mq_free_tag_set(&rbd_dev->tag_set);
3766 put_disk(rbd_dev->disk);
3767 rbd_dev->disk = NULL;
3770 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3771 struct ceph_object_id *oid,
3772 struct ceph_object_locator *oloc,
3773 void *buf, int buf_len)
3776 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3777 struct ceph_osd_request *req;
3778 struct page **pages;
3779 int num_pages = calc_pages_for(0, buf_len);
3782 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
3786 ceph_oid_copy(&req->r_base_oid, oid);
3787 ceph_oloc_copy(&req->r_base_oloc, oloc);
3788 req->r_flags = CEPH_OSD_FLAG_READ;
3790 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
3791 if (IS_ERR(pages)) {
3792 ret = PTR_ERR(pages);
3796 osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0);
3797 osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false,
3800 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
3804 ceph_osdc_start_request(osdc, req, false);
3805 ret = ceph_osdc_wait_request(osdc, req);
3807 ceph_copy_from_page_vector(pages, buf, 0, ret);
3810 ceph_osdc_put_request(req);
3815 * Read the complete header for the given rbd device. On successful
3816 * return, the rbd_dev->header field will contain up-to-date
3817 * information about the image.
3819 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3821 struct rbd_image_header_ondisk *ondisk = NULL;
3828 * The complete header will include an array of its 64-bit
3829 * snapshot ids, followed by the names of those snapshots as
3830 * a contiguous block of NUL-terminated strings. Note that
3831 * the number of snapshots could change by the time we read
3832 * it in, in which case we re-read it.
3839 size = sizeof (*ondisk);
3840 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3842 ondisk = kmalloc(size, GFP_KERNEL);
3846 ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid,
3847 &rbd_dev->header_oloc, ondisk, size);
3850 if ((size_t)ret < size) {
3852 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3856 if (!rbd_dev_ondisk_valid(ondisk)) {
3858 rbd_warn(rbd_dev, "invalid header");
3862 names_size = le64_to_cpu(ondisk->snap_names_len);
3863 want_count = snap_count;
3864 snap_count = le32_to_cpu(ondisk->snap_count);
3865 } while (snap_count != want_count);
3867 ret = rbd_header_from_disk(rbd_dev, ondisk);
3875 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3876 * has disappeared from the (just updated) snapshot context.
3878 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3882 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3885 snap_id = rbd_dev->spec->snap_id;
3886 if (snap_id == CEPH_NOSNAP)
3889 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3890 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3893 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3898 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
3899 * try to update its size. If REMOVING is set, updating size
3900 * is just useless work since the device can't be opened.
3902 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
3903 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
3904 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3905 dout("setting size to %llu sectors", (unsigned long long)size);
3906 set_capacity(rbd_dev->disk, size);
3907 revalidate_disk(rbd_dev->disk);
3911 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3916 down_write(&rbd_dev->header_rwsem);
3917 mapping_size = rbd_dev->mapping.size;
3919 ret = rbd_dev_header_info(rbd_dev);
3924 * If there is a parent, see if it has disappeared due to the
3925 * mapped image getting flattened.
3927 if (rbd_dev->parent) {
3928 ret = rbd_dev_v2_parent_info(rbd_dev);
3933 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
3934 rbd_dev->mapping.size = rbd_dev->header.image_size;
3936 /* validate mapped snapshot's EXISTS flag */
3937 rbd_exists_validate(rbd_dev);
3941 up_write(&rbd_dev->header_rwsem);
3942 if (!ret && mapping_size != rbd_dev->mapping.size)
3943 rbd_dev_update_size(rbd_dev);
3948 static int rbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
3949 unsigned int hctx_idx, unsigned int numa_node)
3951 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3953 INIT_WORK(work, rbd_queue_workfn);
3957 static const struct blk_mq_ops rbd_mq_ops = {
3958 .queue_rq = rbd_queue_rq,
3959 .init_request = rbd_init_request,
3962 static int rbd_init_disk(struct rbd_device *rbd_dev)
3964 struct gendisk *disk;
3965 struct request_queue *q;
3966 unsigned int objset_bytes =
3967 rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
3970 /* create gendisk info */
3971 disk = alloc_disk(single_major ?
3972 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3973 RBD_MINORS_PER_MAJOR);
3977 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3979 disk->major = rbd_dev->major;
3980 disk->first_minor = rbd_dev->minor;
3982 disk->flags |= GENHD_FL_EXT_DEVT;
3983 disk->fops = &rbd_bd_ops;
3984 disk->private_data = rbd_dev;
3986 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
3987 rbd_dev->tag_set.ops = &rbd_mq_ops;
3988 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
3989 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
3990 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
3991 rbd_dev->tag_set.nr_hw_queues = 1;
3992 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
3994 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
3998 q = blk_mq_init_queue(&rbd_dev->tag_set);
4004 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
4005 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
4007 blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT);
4008 q->limits.max_sectors = queue_max_hw_sectors(q);
4009 blk_queue_max_segments(q, USHRT_MAX);
4010 blk_queue_max_segment_size(q, UINT_MAX);
4011 blk_queue_io_min(q, objset_bytes);
4012 blk_queue_io_opt(q, objset_bytes);
4014 if (rbd_dev->opts->trim) {
4015 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
4016 q->limits.discard_granularity = objset_bytes;
4017 blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
4018 blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
4021 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
4022 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
4025 * disk_release() expects a queue ref from add_disk() and will
4026 * put it. Hold an extra ref until add_disk() is called.
4028 WARN_ON(!blk_get_queue(q));
4030 q->queuedata = rbd_dev;
4032 rbd_dev->disk = disk;
4036 blk_mq_free_tag_set(&rbd_dev->tag_set);
4046 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
4048 return container_of(dev, struct rbd_device, dev);
4051 static ssize_t rbd_size_show(struct device *dev,
4052 struct device_attribute *attr, char *buf)
4054 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4056 return sprintf(buf, "%llu\n",
4057 (unsigned long long)rbd_dev->mapping.size);
4061 * Note this shows the features for whatever's mapped, which is not
4062 * necessarily the base image.
4064 static ssize_t rbd_features_show(struct device *dev,
4065 struct device_attribute *attr, char *buf)
4067 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4069 return sprintf(buf, "0x%016llx\n",
4070 (unsigned long long)rbd_dev->mapping.features);
4073 static ssize_t rbd_major_show(struct device *dev,
4074 struct device_attribute *attr, char *buf)
4076 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4079 return sprintf(buf, "%d\n", rbd_dev->major);
4081 return sprintf(buf, "(none)\n");
4084 static ssize_t rbd_minor_show(struct device *dev,
4085 struct device_attribute *attr, char *buf)
4087 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4089 return sprintf(buf, "%d\n", rbd_dev->minor);
4092 static ssize_t rbd_client_addr_show(struct device *dev,
4093 struct device_attribute *attr, char *buf)
4095 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4096 struct ceph_entity_addr *client_addr =
4097 ceph_client_addr(rbd_dev->rbd_client->client);
4099 return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
4100 le32_to_cpu(client_addr->nonce));
4103 static ssize_t rbd_client_id_show(struct device *dev,
4104 struct device_attribute *attr, char *buf)
4106 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4108 return sprintf(buf, "client%lld\n",
4109 ceph_client_gid(rbd_dev->rbd_client->client));
4112 static ssize_t rbd_cluster_fsid_show(struct device *dev,
4113 struct device_attribute *attr, char *buf)
4115 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4117 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
4120 static ssize_t rbd_config_info_show(struct device *dev,
4121 struct device_attribute *attr, char *buf)
4123 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4125 return sprintf(buf, "%s\n", rbd_dev->config_info);
4128 static ssize_t rbd_pool_show(struct device *dev,
4129 struct device_attribute *attr, char *buf)
4131 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4133 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
4136 static ssize_t rbd_pool_id_show(struct device *dev,
4137 struct device_attribute *attr, char *buf)
4139 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4141 return sprintf(buf, "%llu\n",
4142 (unsigned long long) rbd_dev->spec->pool_id);
4145 static ssize_t rbd_pool_ns_show(struct device *dev,
4146 struct device_attribute *attr, char *buf)
4148 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4150 return sprintf(buf, "%s\n", rbd_dev->spec->pool_ns ?: "");
4153 static ssize_t rbd_name_show(struct device *dev,
4154 struct device_attribute *attr, char *buf)
4156 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4158 if (rbd_dev->spec->image_name)
4159 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
4161 return sprintf(buf, "(unknown)\n");
4164 static ssize_t rbd_image_id_show(struct device *dev,
4165 struct device_attribute *attr, char *buf)
4167 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4169 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
4173 * Shows the name of the currently-mapped snapshot (or
4174 * RBD_SNAP_HEAD_NAME for the base image).
4176 static ssize_t rbd_snap_show(struct device *dev,
4177 struct device_attribute *attr,
4180 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4182 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
4185 static ssize_t rbd_snap_id_show(struct device *dev,
4186 struct device_attribute *attr, char *buf)
4188 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4190 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
4194 * For a v2 image, shows the chain of parent images, separated by empty
4195 * lines. For v1 images or if there is no parent, shows "(no parent
4198 static ssize_t rbd_parent_show(struct device *dev,
4199 struct device_attribute *attr,
4202 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4205 if (!rbd_dev->parent)
4206 return sprintf(buf, "(no parent image)\n");
4208 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
4209 struct rbd_spec *spec = rbd_dev->parent_spec;
4211 count += sprintf(&buf[count], "%s"
4212 "pool_id %llu\npool_name %s\n"
4214 "image_id %s\nimage_name %s\n"
4215 "snap_id %llu\nsnap_name %s\n"
4217 !count ? "" : "\n", /* first? */
4218 spec->pool_id, spec->pool_name,
4219 spec->pool_ns ?: "",
4220 spec->image_id, spec->image_name ?: "(unknown)",
4221 spec->snap_id, spec->snap_name,
4222 rbd_dev->parent_overlap);
4228 static ssize_t rbd_image_refresh(struct device *dev,
4229 struct device_attribute *attr,
4233 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4236 ret = rbd_dev_refresh(rbd_dev);
4243 static DEVICE_ATTR(size, 0444, rbd_size_show, NULL);
4244 static DEVICE_ATTR(features, 0444, rbd_features_show, NULL);
4245 static DEVICE_ATTR(major, 0444, rbd_major_show, NULL);
4246 static DEVICE_ATTR(minor, 0444, rbd_minor_show, NULL);
4247 static DEVICE_ATTR(client_addr, 0444, rbd_client_addr_show, NULL);
4248 static DEVICE_ATTR(client_id, 0444, rbd_client_id_show, NULL);
4249 static DEVICE_ATTR(cluster_fsid, 0444, rbd_cluster_fsid_show, NULL);
4250 static DEVICE_ATTR(config_info, 0400, rbd_config_info_show, NULL);
4251 static DEVICE_ATTR(pool, 0444, rbd_pool_show, NULL);
4252 static DEVICE_ATTR(pool_id, 0444, rbd_pool_id_show, NULL);
4253 static DEVICE_ATTR(pool_ns, 0444, rbd_pool_ns_show, NULL);
4254 static DEVICE_ATTR(name, 0444, rbd_name_show, NULL);
4255 static DEVICE_ATTR(image_id, 0444, rbd_image_id_show, NULL);
4256 static DEVICE_ATTR(refresh, 0200, NULL, rbd_image_refresh);
4257 static DEVICE_ATTR(current_snap, 0444, rbd_snap_show, NULL);
4258 static DEVICE_ATTR(snap_id, 0444, rbd_snap_id_show, NULL);
4259 static DEVICE_ATTR(parent, 0444, rbd_parent_show, NULL);
4261 static struct attribute *rbd_attrs[] = {
4262 &dev_attr_size.attr,
4263 &dev_attr_features.attr,
4264 &dev_attr_major.attr,
4265 &dev_attr_minor.attr,
4266 &dev_attr_client_addr.attr,
4267 &dev_attr_client_id.attr,
4268 &dev_attr_cluster_fsid.attr,
4269 &dev_attr_config_info.attr,
4270 &dev_attr_pool.attr,
4271 &dev_attr_pool_id.attr,
4272 &dev_attr_pool_ns.attr,
4273 &dev_attr_name.attr,
4274 &dev_attr_image_id.attr,
4275 &dev_attr_current_snap.attr,
4276 &dev_attr_snap_id.attr,
4277 &dev_attr_parent.attr,
4278 &dev_attr_refresh.attr,
4282 static struct attribute_group rbd_attr_group = {
4286 static const struct attribute_group *rbd_attr_groups[] = {
4291 static void rbd_dev_release(struct device *dev);
4293 static const struct device_type rbd_device_type = {
4295 .groups = rbd_attr_groups,
4296 .release = rbd_dev_release,
4299 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
4301 kref_get(&spec->kref);
4306 static void rbd_spec_free(struct kref *kref);
4307 static void rbd_spec_put(struct rbd_spec *spec)
4310 kref_put(&spec->kref, rbd_spec_free);
4313 static struct rbd_spec *rbd_spec_alloc(void)
4315 struct rbd_spec *spec;
4317 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
4321 spec->pool_id = CEPH_NOPOOL;
4322 spec->snap_id = CEPH_NOSNAP;
4323 kref_init(&spec->kref);
4328 static void rbd_spec_free(struct kref *kref)
4330 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
4332 kfree(spec->pool_name);
4333 kfree(spec->pool_ns);
4334 kfree(spec->image_id);
4335 kfree(spec->image_name);
4336 kfree(spec->snap_name);
4340 static void rbd_dev_free(struct rbd_device *rbd_dev)
4342 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
4343 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
4345 ceph_oid_destroy(&rbd_dev->header_oid);
4346 ceph_oloc_destroy(&rbd_dev->header_oloc);
4347 kfree(rbd_dev->config_info);
4349 rbd_put_client(rbd_dev->rbd_client);
4350 rbd_spec_put(rbd_dev->spec);
4351 kfree(rbd_dev->opts);
4355 static void rbd_dev_release(struct device *dev)
4357 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4358 bool need_put = !!rbd_dev->opts;
4361 destroy_workqueue(rbd_dev->task_wq);
4362 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4365 rbd_dev_free(rbd_dev);
4368 * This is racy, but way better than putting module outside of
4369 * the release callback. The race window is pretty small, so
4370 * doing something similar to dm (dm-builtin.c) is overkill.
4373 module_put(THIS_MODULE);
4376 static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
4377 struct rbd_spec *spec)
4379 struct rbd_device *rbd_dev;
4381 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
4385 spin_lock_init(&rbd_dev->lock);
4386 INIT_LIST_HEAD(&rbd_dev->node);
4387 init_rwsem(&rbd_dev->header_rwsem);
4389 rbd_dev->header.data_pool_id = CEPH_NOPOOL;
4390 ceph_oid_init(&rbd_dev->header_oid);
4391 rbd_dev->header_oloc.pool = spec->pool_id;
4392 if (spec->pool_ns) {
4393 WARN_ON(!*spec->pool_ns);
4394 rbd_dev->header_oloc.pool_ns =
4395 ceph_find_or_create_string(spec->pool_ns,
4396 strlen(spec->pool_ns));
4399 mutex_init(&rbd_dev->watch_mutex);
4400 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
4401 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
4403 init_rwsem(&rbd_dev->lock_rwsem);
4404 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
4405 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
4406 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
4407 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
4408 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
4409 init_waitqueue_head(&rbd_dev->lock_waitq);
4411 rbd_dev->dev.bus = &rbd_bus_type;
4412 rbd_dev->dev.type = &rbd_device_type;
4413 rbd_dev->dev.parent = &rbd_root_dev;
4414 device_initialize(&rbd_dev->dev);
4416 rbd_dev->rbd_client = rbdc;
4417 rbd_dev->spec = spec;
4423 * Create a mapping rbd_dev.
4425 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
4426 struct rbd_spec *spec,
4427 struct rbd_options *opts)
4429 struct rbd_device *rbd_dev;
4431 rbd_dev = __rbd_dev_create(rbdc, spec);
4435 rbd_dev->opts = opts;
4437 /* get an id and fill in device name */
4438 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
4439 minor_to_rbd_dev_id(1 << MINORBITS),
4441 if (rbd_dev->dev_id < 0)
4444 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
4445 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
4447 if (!rbd_dev->task_wq)
4450 /* we have a ref from do_rbd_add() */
4451 __module_get(THIS_MODULE);
4453 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
4457 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4459 rbd_dev_free(rbd_dev);
4463 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
4466 put_device(&rbd_dev->dev);
4470 * Get the size and object order for an image snapshot, or if
4471 * snap_id is CEPH_NOSNAP, gets this information for the base
4474 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
4475 u8 *order, u64 *snap_size)
4477 __le64 snapid = cpu_to_le64(snap_id);
4482 } __attribute__ ((packed)) size_buf = { 0 };
4484 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4485 &rbd_dev->header_oloc, "get_size",
4486 &snapid, sizeof(snapid),
4487 &size_buf, sizeof(size_buf));
4488 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4491 if (ret < sizeof (size_buf))
4495 *order = size_buf.order;
4496 dout(" order %u", (unsigned int)*order);
4498 *snap_size = le64_to_cpu(size_buf.size);
4500 dout(" snap_id 0x%016llx snap_size = %llu\n",
4501 (unsigned long long)snap_id,
4502 (unsigned long long)*snap_size);
4507 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
4509 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
4510 &rbd_dev->header.obj_order,
4511 &rbd_dev->header.image_size);
4514 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
4520 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
4524 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4525 &rbd_dev->header_oloc, "get_object_prefix",
4526 NULL, 0, reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
4527 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4532 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
4533 p + ret, NULL, GFP_NOIO);
4536 if (IS_ERR(rbd_dev->header.object_prefix)) {
4537 ret = PTR_ERR(rbd_dev->header.object_prefix);
4538 rbd_dev->header.object_prefix = NULL;
4540 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
4548 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4551 __le64 snapid = cpu_to_le64(snap_id);
4555 } __attribute__ ((packed)) features_buf = { 0 };
4559 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4560 &rbd_dev->header_oloc, "get_features",
4561 &snapid, sizeof(snapid),
4562 &features_buf, sizeof(features_buf));
4563 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4566 if (ret < sizeof (features_buf))
4569 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
4571 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
4576 *snap_features = le64_to_cpu(features_buf.features);
4578 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
4579 (unsigned long long)snap_id,
4580 (unsigned long long)*snap_features,
4581 (unsigned long long)le64_to_cpu(features_buf.incompat));
4586 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
4588 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
4589 &rbd_dev->header.features);
4592 struct parent_image_info {
4594 const char *pool_ns;
4595 const char *image_id;
4603 * The caller is responsible for @pii.
4605 static int decode_parent_image_spec(void **p, void *end,
4606 struct parent_image_info *pii)
4612 ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",
4613 &struct_v, &struct_len);
4617 ceph_decode_64_safe(p, end, pii->pool_id, e_inval);
4618 pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
4619 if (IS_ERR(pii->pool_ns)) {
4620 ret = PTR_ERR(pii->pool_ns);
4621 pii->pool_ns = NULL;
4624 pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
4625 if (IS_ERR(pii->image_id)) {
4626 ret = PTR_ERR(pii->image_id);
4627 pii->image_id = NULL;
4630 ceph_decode_64_safe(p, end, pii->snap_id, e_inval);
4637 static int __get_parent_info(struct rbd_device *rbd_dev,
4638 struct page *req_page,
4639 struct page *reply_page,
4640 struct parent_image_info *pii)
4642 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4643 size_t reply_len = PAGE_SIZE;
4647 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
4648 "rbd", "parent_get", CEPH_OSD_FLAG_READ,
4649 req_page, sizeof(u64), reply_page, &reply_len);
4651 return ret == -EOPNOTSUPP ? 1 : ret;
4653 p = page_address(reply_page);
4654 end = p + reply_len;
4655 ret = decode_parent_image_spec(&p, end, pii);
4659 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
4660 "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ,
4661 req_page, sizeof(u64), reply_page, &reply_len);
4665 p = page_address(reply_page);
4666 end = p + reply_len;
4667 ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);
4668 if (pii->has_overlap)
4669 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
4678 * The caller is responsible for @pii.
4680 static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
4681 struct page *req_page,
4682 struct page *reply_page,
4683 struct parent_image_info *pii)
4685 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4686 size_t reply_len = PAGE_SIZE;
4690 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
4691 "rbd", "get_parent", CEPH_OSD_FLAG_READ,
4692 req_page, sizeof(u64), reply_page, &reply_len);
4696 p = page_address(reply_page);
4697 end = p + reply_len;
4698 ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);
4699 pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4700 if (IS_ERR(pii->image_id)) {
4701 ret = PTR_ERR(pii->image_id);
4702 pii->image_id = NULL;
4705 ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);
4706 pii->has_overlap = true;
4707 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
4715 static int get_parent_info(struct rbd_device *rbd_dev,
4716 struct parent_image_info *pii)
4718 struct page *req_page, *reply_page;
4722 req_page = alloc_page(GFP_KERNEL);
4726 reply_page = alloc_page(GFP_KERNEL);
4728 __free_page(req_page);
4732 p = page_address(req_page);
4733 ceph_encode_64(&p, rbd_dev->spec->snap_id);
4734 ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
4736 ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
4739 __free_page(req_page);
4740 __free_page(reply_page);
4744 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4746 struct rbd_spec *parent_spec;
4747 struct parent_image_info pii = { 0 };
4750 parent_spec = rbd_spec_alloc();
4754 ret = get_parent_info(rbd_dev, &pii);
4758 dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
4759 __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
4760 pii.has_overlap, pii.overlap);
4762 if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
4764 * Either the parent never existed, or we have
4765 * record of it but the image got flattened so it no
4766 * longer has a parent. When the parent of a
4767 * layered image disappears we immediately set the
4768 * overlap to 0. The effect of this is that all new
4769 * requests will be treated as if the image had no
4772 * If !pii.has_overlap, the parent image spec is not
4773 * applicable. It's there to avoid duplication in each
4776 if (rbd_dev->parent_overlap) {
4777 rbd_dev->parent_overlap = 0;
4778 rbd_dev_parent_put(rbd_dev);
4779 pr_info("%s: clone image has been flattened\n",
4780 rbd_dev->disk->disk_name);
4783 goto out; /* No parent? No problem. */
4786 /* The ceph file layout needs to fit pool id in 32 bits */
4789 if (pii.pool_id > (u64)U32_MAX) {
4790 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
4791 (unsigned long long)pii.pool_id, U32_MAX);
4796 * The parent won't change (except when the clone is
4797 * flattened, already handled that). So we only need to
4798 * record the parent spec we have not already done so.
4800 if (!rbd_dev->parent_spec) {
4801 parent_spec->pool_id = pii.pool_id;
4802 if (pii.pool_ns && *pii.pool_ns) {
4803 parent_spec->pool_ns = pii.pool_ns;
4806 parent_spec->image_id = pii.image_id;
4807 pii.image_id = NULL;
4808 parent_spec->snap_id = pii.snap_id;
4810 rbd_dev->parent_spec = parent_spec;
4811 parent_spec = NULL; /* rbd_dev now owns this */
4815 * We always update the parent overlap. If it's zero we issue
4816 * a warning, as we will proceed as if there was no parent.
4820 /* refresh, careful to warn just once */
4821 if (rbd_dev->parent_overlap)
4823 "clone now standalone (overlap became 0)");
4826 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
4829 rbd_dev->parent_overlap = pii.overlap;
4835 kfree(pii.image_id);
4836 rbd_spec_put(parent_spec);
4840 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4844 __le64 stripe_count;
4845 } __attribute__ ((packed)) striping_info_buf = { 0 };
4846 size_t size = sizeof (striping_info_buf);
4850 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4851 &rbd_dev->header_oloc, "get_stripe_unit_count",
4852 NULL, 0, &striping_info_buf, size);
4853 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4859 p = &striping_info_buf;
4860 rbd_dev->header.stripe_unit = ceph_decode_64(&p);
4861 rbd_dev->header.stripe_count = ceph_decode_64(&p);
4865 static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
4867 __le64 data_pool_id;
4870 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4871 &rbd_dev->header_oloc, "get_data_pool",
4872 NULL, 0, &data_pool_id, sizeof(data_pool_id));
4875 if (ret < sizeof(data_pool_id))
4878 rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
4879 WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
4883 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4885 CEPH_DEFINE_OID_ONSTACK(oid);
4886 size_t image_id_size;
4891 void *reply_buf = NULL;
4893 char *image_name = NULL;
4896 rbd_assert(!rbd_dev->spec->image_name);
4898 len = strlen(rbd_dev->spec->image_id);
4899 image_id_size = sizeof (__le32) + len;
4900 image_id = kmalloc(image_id_size, GFP_KERNEL);
4905 end = image_id + image_id_size;
4906 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4908 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4909 reply_buf = kmalloc(size, GFP_KERNEL);
4913 ceph_oid_printf(&oid, "%s", RBD_DIRECTORY);
4914 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
4915 "dir_get_name", image_id, image_id_size,
4920 end = reply_buf + ret;
4922 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4923 if (IS_ERR(image_name))
4926 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4934 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4936 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4937 const char *snap_name;
4940 /* Skip over names until we find the one we are looking for */
4942 snap_name = rbd_dev->header.snap_names;
4943 while (which < snapc->num_snaps) {
4944 if (!strcmp(name, snap_name))
4945 return snapc->snaps[which];
4946 snap_name += strlen(snap_name) + 1;
4952 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4954 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4959 for (which = 0; !found && which < snapc->num_snaps; which++) {
4960 const char *snap_name;
4962 snap_id = snapc->snaps[which];
4963 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4964 if (IS_ERR(snap_name)) {
4965 /* ignore no-longer existing snapshots */
4966 if (PTR_ERR(snap_name) == -ENOENT)
4971 found = !strcmp(name, snap_name);
4974 return found ? snap_id : CEPH_NOSNAP;
4978 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4979 * no snapshot by that name is found, or if an error occurs.
4981 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4983 if (rbd_dev->image_format == 1)
4984 return rbd_v1_snap_id_by_name(rbd_dev, name);
4986 return rbd_v2_snap_id_by_name(rbd_dev, name);
4990 * An image being mapped will have everything but the snap id.
4992 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
4994 struct rbd_spec *spec = rbd_dev->spec;
4996 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
4997 rbd_assert(spec->image_id && spec->image_name);
4998 rbd_assert(spec->snap_name);
5000 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
5003 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
5004 if (snap_id == CEPH_NOSNAP)
5007 spec->snap_id = snap_id;
5009 spec->snap_id = CEPH_NOSNAP;
5016 * A parent image will have all ids but none of the names.
5018 * All names in an rbd spec are dynamically allocated. It's OK if we
5019 * can't figure out the name for an image id.
5021 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
5023 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5024 struct rbd_spec *spec = rbd_dev->spec;
5025 const char *pool_name;
5026 const char *image_name;
5027 const char *snap_name;
5030 rbd_assert(spec->pool_id != CEPH_NOPOOL);
5031 rbd_assert(spec->image_id);
5032 rbd_assert(spec->snap_id != CEPH_NOSNAP);
5034 /* Get the pool name; we have to make our own copy of this */
5036 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
5038 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
5041 pool_name = kstrdup(pool_name, GFP_KERNEL);
5045 /* Fetch the image name; tolerate failure here */
5047 image_name = rbd_dev_image_name(rbd_dev);
5049 rbd_warn(rbd_dev, "unable to get image name");
5051 /* Fetch the snapshot name */
5053 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
5054 if (IS_ERR(snap_name)) {
5055 ret = PTR_ERR(snap_name);
5059 spec->pool_name = pool_name;
5060 spec->image_name = image_name;
5061 spec->snap_name = snap_name;
5071 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
5080 struct ceph_snap_context *snapc;
5084 * We'll need room for the seq value (maximum snapshot id),
5085 * snapshot count, and array of that many snapshot ids.
5086 * For now we have a fixed upper limit on the number we're
5087 * prepared to receive.
5089 size = sizeof (__le64) + sizeof (__le32) +
5090 RBD_MAX_SNAP_COUNT * sizeof (__le64);
5091 reply_buf = kzalloc(size, GFP_KERNEL);
5095 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5096 &rbd_dev->header_oloc, "get_snapcontext",
5097 NULL, 0, reply_buf, size);
5098 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5103 end = reply_buf + ret;
5105 ceph_decode_64_safe(&p, end, seq, out);
5106 ceph_decode_32_safe(&p, end, snap_count, out);
5109 * Make sure the reported number of snapshot ids wouldn't go
5110 * beyond the end of our buffer. But before checking that,
5111 * make sure the computed size of the snapshot context we
5112 * allocate is representable in a size_t.
5114 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
5119 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
5123 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
5129 for (i = 0; i < snap_count; i++)
5130 snapc->snaps[i] = ceph_decode_64(&p);
5132 ceph_put_snap_context(rbd_dev->header.snapc);
5133 rbd_dev->header.snapc = snapc;
5135 dout(" snap context seq = %llu, snap_count = %u\n",
5136 (unsigned long long)seq, (unsigned int)snap_count);
5143 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
5154 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
5155 reply_buf = kmalloc(size, GFP_KERNEL);
5157 return ERR_PTR(-ENOMEM);
5159 snapid = cpu_to_le64(snap_id);
5160 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5161 &rbd_dev->header_oloc, "get_snapshot_name",
5162 &snapid, sizeof(snapid), reply_buf, size);
5163 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5165 snap_name = ERR_PTR(ret);
5170 end = reply_buf + ret;
5171 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
5172 if (IS_ERR(snap_name))
5175 dout(" snap_id 0x%016llx snap_name = %s\n",
5176 (unsigned long long)snap_id, snap_name);
5183 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
5185 bool first_time = rbd_dev->header.object_prefix == NULL;
5188 ret = rbd_dev_v2_image_size(rbd_dev);
5193 ret = rbd_dev_v2_header_onetime(rbd_dev);
5198 ret = rbd_dev_v2_snap_context(rbd_dev);
5199 if (ret && first_time) {
5200 kfree(rbd_dev->header.object_prefix);
5201 rbd_dev->header.object_prefix = NULL;
5207 static int rbd_dev_header_info(struct rbd_device *rbd_dev)
5209 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5211 if (rbd_dev->image_format == 1)
5212 return rbd_dev_v1_header_info(rbd_dev);
5214 return rbd_dev_v2_header_info(rbd_dev);
5218 * Skips over white space at *buf, and updates *buf to point to the
5219 * first found non-space character (if any). Returns the length of
5220 * the token (string of non-white space characters) found. Note
5221 * that *buf must be terminated with '\0'.
5223 static inline size_t next_token(const char **buf)
5226 * These are the characters that produce nonzero for
5227 * isspace() in the "C" and "POSIX" locales.
5229 const char *spaces = " \f\n\r\t\v";
5231 *buf += strspn(*buf, spaces); /* Find start of token */
5233 return strcspn(*buf, spaces); /* Return token length */
5237 * Finds the next token in *buf, dynamically allocates a buffer big
5238 * enough to hold a copy of it, and copies the token into the new
5239 * buffer. The copy is guaranteed to be terminated with '\0'. Note
5240 * that a duplicate buffer is created even for a zero-length token.
5242 * Returns a pointer to the newly-allocated duplicate, or a null
5243 * pointer if memory for the duplicate was not available. If
5244 * the lenp argument is a non-null pointer, the length of the token
5245 * (not including the '\0') is returned in *lenp.
5247 * If successful, the *buf pointer will be updated to point beyond
5248 * the end of the found token.
5250 * Note: uses GFP_KERNEL for allocation.
5252 static inline char *dup_token(const char **buf, size_t *lenp)
5257 len = next_token(buf);
5258 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
5261 *(dup + len) = '\0';
5271 * Parse the options provided for an "rbd add" (i.e., rbd image
5272 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
5273 * and the data written is passed here via a NUL-terminated buffer.
5274 * Returns 0 if successful or an error code otherwise.
5276 * The information extracted from these options is recorded in
5277 * the other parameters which return dynamically-allocated
5280 * The address of a pointer that will refer to a ceph options
5281 * structure. Caller must release the returned pointer using
5282 * ceph_destroy_options() when it is no longer needed.
5284 * Address of an rbd options pointer. Fully initialized by
5285 * this function; caller must release with kfree().
5287 * Address of an rbd image specification pointer. Fully
5288 * initialized by this function based on parsed options.
5289 * Caller must release with rbd_spec_put().
5291 * The options passed take this form:
5292 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
5295 * A comma-separated list of one or more monitor addresses.
5296 * A monitor address is an ip address, optionally followed
5297 * by a port number (separated by a colon).
5298 * I.e.: ip1[:port1][,ip2[:port2]...]
5300 * A comma-separated list of ceph and/or rbd options.
5302 * The name of the rados pool containing the rbd image.
5304 * The name of the image in that pool to map.
5306 * An optional snapshot id. If provided, the mapping will
5307 * present data from the image at the time that snapshot was
5308 * created. The image head is used if no snapshot id is
5309 * provided. Snapshot mappings are always read-only.
5311 static int rbd_add_parse_args(const char *buf,
5312 struct ceph_options **ceph_opts,
5313 struct rbd_options **opts,
5314 struct rbd_spec **rbd_spec)
5318 const char *mon_addrs;
5320 size_t mon_addrs_size;
5321 struct parse_rbd_opts_ctx pctx = { 0 };
5322 struct ceph_options *copts;
5325 /* The first four tokens are required */
5327 len = next_token(&buf);
5329 rbd_warn(NULL, "no monitor address(es) provided");
5333 mon_addrs_size = len + 1;
5337 options = dup_token(&buf, NULL);
5341 rbd_warn(NULL, "no options provided");
5345 pctx.spec = rbd_spec_alloc();
5349 pctx.spec->pool_name = dup_token(&buf, NULL);
5350 if (!pctx.spec->pool_name)
5352 if (!*pctx.spec->pool_name) {
5353 rbd_warn(NULL, "no pool name provided");
5357 pctx.spec->image_name = dup_token(&buf, NULL);
5358 if (!pctx.spec->image_name)
5360 if (!*pctx.spec->image_name) {
5361 rbd_warn(NULL, "no image name provided");
5366 * Snapshot name is optional; default is to use "-"
5367 * (indicating the head/no snapshot).
5369 len = next_token(&buf);
5371 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
5372 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
5373 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
5374 ret = -ENAMETOOLONG;
5377 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
5380 *(snap_name + len) = '\0';
5381 pctx.spec->snap_name = snap_name;
5383 /* Initialize all rbd options to the defaults */
5385 pctx.opts = kzalloc(sizeof(*pctx.opts), GFP_KERNEL);
5389 pctx.opts->read_only = RBD_READ_ONLY_DEFAULT;
5390 pctx.opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
5391 pctx.opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
5392 pctx.opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
5393 pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
5394 pctx.opts->trim = RBD_TRIM_DEFAULT;
5396 copts = ceph_parse_options(options, mon_addrs,
5397 mon_addrs + mon_addrs_size - 1,
5398 parse_rbd_opts_token, &pctx);
5399 if (IS_ERR(copts)) {
5400 ret = PTR_ERR(copts);
5407 *rbd_spec = pctx.spec;
5414 rbd_spec_put(pctx.spec);
5420 static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
5422 down_write(&rbd_dev->lock_rwsem);
5423 if (__rbd_is_lock_owner(rbd_dev))
5424 rbd_unlock(rbd_dev);
5425 up_write(&rbd_dev->lock_rwsem);
5428 static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
5432 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
5433 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
5437 /* FIXME: "rbd map --exclusive" should be in interruptible */
5438 down_read(&rbd_dev->lock_rwsem);
5439 ret = rbd_wait_state_locked(rbd_dev, true);
5440 up_read(&rbd_dev->lock_rwsem);
5442 rbd_warn(rbd_dev, "failed to acquire exclusive lock");
5450 * An rbd format 2 image has a unique identifier, distinct from the
5451 * name given to it by the user. Internally, that identifier is
5452 * what's used to specify the names of objects related to the image.
5454 * A special "rbd id" object is used to map an rbd image name to its
5455 * id. If that object doesn't exist, then there is no v2 rbd image
5456 * with the supplied name.
5458 * This function will record the given rbd_dev's image_id field if
5459 * it can be determined, and in that case will return 0. If any
5460 * errors occur a negative errno will be returned and the rbd_dev's
5461 * image_id field will be unchanged (and should be NULL).
5463 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
5467 CEPH_DEFINE_OID_ONSTACK(oid);
5472 * When probing a parent image, the image id is already
5473 * known (and the image name likely is not). There's no
5474 * need to fetch the image id again in this case. We
5475 * do still need to set the image format though.
5477 if (rbd_dev->spec->image_id) {
5478 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
5484 * First, see if the format 2 image id file exists, and if
5485 * so, get the image's persistent id from it.
5487 ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX,
5488 rbd_dev->spec->image_name);
5492 dout("rbd id object name is %s\n", oid.name);
5494 /* Response will be an encoded string, which includes a length */
5496 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
5497 response = kzalloc(size, GFP_NOIO);
5503 /* If it doesn't exist we'll assume it's a format 1 image */
5505 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
5507 response, RBD_IMAGE_ID_LEN_MAX);
5508 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5509 if (ret == -ENOENT) {
5510 image_id = kstrdup("", GFP_KERNEL);
5511 ret = image_id ? 0 : -ENOMEM;
5513 rbd_dev->image_format = 1;
5514 } else if (ret >= 0) {
5517 image_id = ceph_extract_encoded_string(&p, p + ret,
5519 ret = PTR_ERR_OR_ZERO(image_id);
5521 rbd_dev->image_format = 2;
5525 rbd_dev->spec->image_id = image_id;
5526 dout("image_id is %s\n", image_id);
5530 ceph_oid_destroy(&oid);
5535 * Undo whatever state changes are made by v1 or v2 header info
5538 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5540 struct rbd_image_header *header;
5542 rbd_dev_parent_put(rbd_dev);
5544 /* Free dynamic fields from the header, then zero it out */
5546 header = &rbd_dev->header;
5547 ceph_put_snap_context(header->snapc);
5548 kfree(header->snap_sizes);
5549 kfree(header->snap_names);
5550 kfree(header->object_prefix);
5551 memset(header, 0, sizeof (*header));
5554 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
5558 ret = rbd_dev_v2_object_prefix(rbd_dev);
5563 * Get the and check features for the image. Currently the
5564 * features are assumed to never change.
5566 ret = rbd_dev_v2_features(rbd_dev);
5570 /* If the image supports fancy striping, get its parameters */
5572 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5573 ret = rbd_dev_v2_striping_info(rbd_dev);
5578 if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
5579 ret = rbd_dev_v2_data_pool(rbd_dev);
5584 rbd_init_layout(rbd_dev);
5588 rbd_dev->header.features = 0;
5589 kfree(rbd_dev->header.object_prefix);
5590 rbd_dev->header.object_prefix = NULL;
5595 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
5596 * rbd_dev_image_probe() recursion depth, which means it's also the
5597 * length of the already discovered part of the parent chain.
5599 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
5601 struct rbd_device *parent = NULL;
5604 if (!rbd_dev->parent_spec)
5607 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
5608 pr_info("parent chain is too long (%d)\n", depth);
5613 parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
5620 * Images related by parent/child relationships always share
5621 * rbd_client and spec/parent_spec, so bump their refcounts.
5623 __rbd_get_client(rbd_dev->rbd_client);
5624 rbd_spec_get(rbd_dev->parent_spec);
5626 ret = rbd_dev_image_probe(parent, depth);
5630 rbd_dev->parent = parent;
5631 atomic_set(&rbd_dev->parent_ref, 1);
5635 rbd_dev_unparent(rbd_dev);
5636 rbd_dev_destroy(parent);
5640 static void rbd_dev_device_release(struct rbd_device *rbd_dev)
5642 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5643 rbd_dev_mapping_clear(rbd_dev);
5644 rbd_free_disk(rbd_dev);
5646 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5650 * rbd_dev->header_rwsem must be locked for write and will be unlocked
5653 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5657 /* Record our major and minor device numbers. */
5659 if (!single_major) {
5660 ret = register_blkdev(0, rbd_dev->name);
5662 goto err_out_unlock;
5664 rbd_dev->major = ret;
5667 rbd_dev->major = rbd_major;
5668 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5671 /* Set up the blkdev mapping. */
5673 ret = rbd_init_disk(rbd_dev);
5675 goto err_out_blkdev;
5677 ret = rbd_dev_mapping_set(rbd_dev);
5681 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
5682 set_disk_ro(rbd_dev->disk, rbd_dev->opts->read_only);
5684 ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
5686 goto err_out_mapping;
5688 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5689 up_write(&rbd_dev->header_rwsem);
5693 rbd_dev_mapping_clear(rbd_dev);
5695 rbd_free_disk(rbd_dev);
5698 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5700 up_write(&rbd_dev->header_rwsem);
5704 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5706 struct rbd_spec *spec = rbd_dev->spec;
5709 /* Record the header object name for this rbd image. */
5711 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5712 if (rbd_dev->image_format == 1)
5713 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
5714 spec->image_name, RBD_SUFFIX);
5716 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
5717 RBD_HEADER_PREFIX, spec->image_id);
5722 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5724 rbd_dev_unprobe(rbd_dev);
5726 rbd_unregister_watch(rbd_dev);
5727 rbd_dev->image_format = 0;
5728 kfree(rbd_dev->spec->image_id);
5729 rbd_dev->spec->image_id = NULL;
5733 * Probe for the existence of the header object for the given rbd
5734 * device. If this image is the one being mapped (i.e., not a
5735 * parent), initiate a watch on its header object before using that
5736 * object to get detailed information about the rbd image.
5738 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
5743 * Get the id from the image id object. Unless there's an
5744 * error, rbd_dev->spec->image_id will be filled in with
5745 * a dynamically-allocated string, and rbd_dev->image_format
5746 * will be set to either 1 or 2.
5748 ret = rbd_dev_image_id(rbd_dev);
5752 ret = rbd_dev_header_name(rbd_dev);
5754 goto err_out_format;
5757 ret = rbd_register_watch(rbd_dev);
5760 pr_info("image %s/%s%s%s does not exist\n",
5761 rbd_dev->spec->pool_name,
5762 rbd_dev->spec->pool_ns ?: "",
5763 rbd_dev->spec->pool_ns ? "/" : "",
5764 rbd_dev->spec->image_name);
5765 goto err_out_format;
5769 ret = rbd_dev_header_info(rbd_dev);
5774 * If this image is the one being mapped, we have pool name and
5775 * id, image name and id, and snap name - need to fill snap id.
5776 * Otherwise this is a parent image, identified by pool, image
5777 * and snap ids - need to fill in names for those ids.
5780 ret = rbd_spec_fill_snap_id(rbd_dev);
5782 ret = rbd_spec_fill_names(rbd_dev);
5785 pr_info("snap %s/%s%s%s@%s does not exist\n",
5786 rbd_dev->spec->pool_name,
5787 rbd_dev->spec->pool_ns ?: "",
5788 rbd_dev->spec->pool_ns ? "/" : "",
5789 rbd_dev->spec->image_name,
5790 rbd_dev->spec->snap_name);
5794 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
5795 ret = rbd_dev_v2_parent_info(rbd_dev);
5800 * Need to warn users if this image is the one being
5801 * mapped and has a parent.
5803 if (!depth && rbd_dev->parent_spec)
5805 "WARNING: kernel layering is EXPERIMENTAL!");
5808 ret = rbd_dev_probe_parent(rbd_dev, depth);
5812 dout("discovered format %u image, header name is %s\n",
5813 rbd_dev->image_format, rbd_dev->header_oid.name);
5817 rbd_dev_unprobe(rbd_dev);
5820 rbd_unregister_watch(rbd_dev);
5822 rbd_dev->image_format = 0;
5823 kfree(rbd_dev->spec->image_id);
5824 rbd_dev->spec->image_id = NULL;
5828 static ssize_t do_rbd_add(struct bus_type *bus,
5832 struct rbd_device *rbd_dev = NULL;
5833 struct ceph_options *ceph_opts = NULL;
5834 struct rbd_options *rbd_opts = NULL;
5835 struct rbd_spec *spec = NULL;
5836 struct rbd_client *rbdc;
5839 if (!try_module_get(THIS_MODULE))
5842 /* parse add command */
5843 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5847 rbdc = rbd_get_client(ceph_opts);
5854 rc = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, spec->pool_name);
5857 pr_info("pool %s does not exist\n", spec->pool_name);
5858 goto err_out_client;
5860 spec->pool_id = (u64)rc;
5862 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
5865 goto err_out_client;
5867 rbdc = NULL; /* rbd_dev now owns this */
5868 spec = NULL; /* rbd_dev now owns this */
5869 rbd_opts = NULL; /* rbd_dev now owns this */
5871 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
5872 if (!rbd_dev->config_info) {
5874 goto err_out_rbd_dev;
5877 down_write(&rbd_dev->header_rwsem);
5878 rc = rbd_dev_image_probe(rbd_dev, 0);
5880 up_write(&rbd_dev->header_rwsem);
5881 goto err_out_rbd_dev;
5884 /* If we are mapping a snapshot it must be marked read-only */
5885 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5886 rbd_dev->opts->read_only = true;
5888 rc = rbd_dev_device_setup(rbd_dev);
5890 goto err_out_image_probe;
5892 if (rbd_dev->opts->exclusive) {
5893 rc = rbd_add_acquire_lock(rbd_dev);
5895 goto err_out_device_setup;
5898 /* Everything's ready. Announce the disk to the world. */
5900 rc = device_add(&rbd_dev->dev);
5902 goto err_out_image_lock;
5904 add_disk(rbd_dev->disk);
5905 /* see rbd_init_disk() */
5906 blk_put_queue(rbd_dev->disk->queue);
5908 spin_lock(&rbd_dev_list_lock);
5909 list_add_tail(&rbd_dev->node, &rbd_dev_list);
5910 spin_unlock(&rbd_dev_list_lock);
5912 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
5913 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
5914 rbd_dev->header.features);
5917 module_put(THIS_MODULE);
5921 rbd_dev_image_unlock(rbd_dev);
5922 err_out_device_setup:
5923 rbd_dev_device_release(rbd_dev);
5924 err_out_image_probe:
5925 rbd_dev_image_release(rbd_dev);
5927 rbd_dev_destroy(rbd_dev);
5929 rbd_put_client(rbdc);
5936 static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count)
5941 return do_rbd_add(bus, buf, count);
5944 static ssize_t add_single_major_store(struct bus_type *bus, const char *buf,
5947 return do_rbd_add(bus, buf, count);
5950 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5952 while (rbd_dev->parent) {
5953 struct rbd_device *first = rbd_dev;
5954 struct rbd_device *second = first->parent;
5955 struct rbd_device *third;
5958 * Follow to the parent with no grandparent and
5961 while (second && (third = second->parent)) {
5966 rbd_dev_image_release(second);
5967 rbd_dev_destroy(second);
5968 first->parent = NULL;
5969 first->parent_overlap = 0;
5971 rbd_assert(first->parent_spec);
5972 rbd_spec_put(first->parent_spec);
5973 first->parent_spec = NULL;
5977 static ssize_t do_rbd_remove(struct bus_type *bus,
5981 struct rbd_device *rbd_dev = NULL;
5982 struct list_head *tmp;
5990 sscanf(buf, "%d %5s", &dev_id, opt_buf);
5992 pr_err("dev_id out of range\n");
5995 if (opt_buf[0] != '\0') {
5996 if (!strcmp(opt_buf, "force")) {
5999 pr_err("bad remove option at '%s'\n", opt_buf);
6005 spin_lock(&rbd_dev_list_lock);
6006 list_for_each(tmp, &rbd_dev_list) {
6007 rbd_dev = list_entry(tmp, struct rbd_device, node);
6008 if (rbd_dev->dev_id == dev_id) {
6014 spin_lock_irq(&rbd_dev->lock);
6015 if (rbd_dev->open_count && !force)
6017 else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING,
6020 spin_unlock_irq(&rbd_dev->lock);
6022 spin_unlock(&rbd_dev_list_lock);
6028 * Prevent new IO from being queued and wait for existing
6029 * IO to complete/fail.
6031 blk_mq_freeze_queue(rbd_dev->disk->queue);
6032 blk_set_queue_dying(rbd_dev->disk->queue);
6035 del_gendisk(rbd_dev->disk);
6036 spin_lock(&rbd_dev_list_lock);
6037 list_del_init(&rbd_dev->node);
6038 spin_unlock(&rbd_dev_list_lock);
6039 device_del(&rbd_dev->dev);
6041 rbd_dev_image_unlock(rbd_dev);
6042 rbd_dev_device_release(rbd_dev);
6043 rbd_dev_image_release(rbd_dev);
6044 rbd_dev_destroy(rbd_dev);
6048 static ssize_t remove_store(struct bus_type *bus, const char *buf, size_t count)
6053 return do_rbd_remove(bus, buf, count);
6056 static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf,
6059 return do_rbd_remove(bus, buf, count);
6063 * create control files in sysfs
6066 static int __init rbd_sysfs_init(void)
6070 ret = device_register(&rbd_root_dev);
6074 ret = bus_register(&rbd_bus_type);
6076 device_unregister(&rbd_root_dev);
6081 static void __exit rbd_sysfs_cleanup(void)
6083 bus_unregister(&rbd_bus_type);
6084 device_unregister(&rbd_root_dev);
6087 static int __init rbd_slab_init(void)
6089 rbd_assert(!rbd_img_request_cache);
6090 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
6091 if (!rbd_img_request_cache)
6094 rbd_assert(!rbd_obj_request_cache);
6095 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
6096 if (!rbd_obj_request_cache)
6102 kmem_cache_destroy(rbd_img_request_cache);
6103 rbd_img_request_cache = NULL;
6107 static void rbd_slab_exit(void)
6109 rbd_assert(rbd_obj_request_cache);
6110 kmem_cache_destroy(rbd_obj_request_cache);
6111 rbd_obj_request_cache = NULL;
6113 rbd_assert(rbd_img_request_cache);
6114 kmem_cache_destroy(rbd_img_request_cache);
6115 rbd_img_request_cache = NULL;
6118 static int __init rbd_init(void)
6122 if (!libceph_compatible(NULL)) {
6123 rbd_warn(NULL, "libceph incompatibility (quitting)");
6127 rc = rbd_slab_init();
6132 * The number of active work items is limited by the number of
6133 * rbd devices * queue depth, so leave @max_active at default.
6135 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
6142 rbd_major = register_blkdev(0, RBD_DRV_NAME);
6143 if (rbd_major < 0) {
6149 rc = rbd_sysfs_init();
6151 goto err_out_blkdev;
6154 pr_info("loaded (major %d)\n", rbd_major);
6156 pr_info("loaded\n");
6162 unregister_blkdev(rbd_major, RBD_DRV_NAME);
6164 destroy_workqueue(rbd_wq);
6170 static void __exit rbd_exit(void)
6172 ida_destroy(&rbd_dev_id_ida);
6173 rbd_sysfs_cleanup();
6175 unregister_blkdev(rbd_major, RBD_DRV_NAME);
6176 destroy_workqueue(rbd_wq);
6180 module_init(rbd_init);
6181 module_exit(rbd_exit);
6183 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
6184 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
6185 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
6186 /* following authorship retained from original osdblk.c */
6187 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
6189 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
6190 MODULE_LICENSE("GPL");