3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/cls_lock_client.h>
35 #include <linux/ceph/striper.h>
36 #include <linux/ceph/decode.h>
37 #include <linux/parser.h>
38 #include <linux/bsearch.h>
40 #include <linux/kernel.h>
41 #include <linux/device.h>
42 #include <linux/module.h>
43 #include <linux/blk-mq.h>
45 #include <linux/blkdev.h>
46 #include <linux/slab.h>
47 #include <linux/idr.h>
48 #include <linux/workqueue.h>
50 #include "rbd_types.h"
52 #define RBD_DEBUG /* Activate rbd_assert() calls */
55 * Increment the given counter and return its updated value.
56 * If the counter is already 0 it will not be incremented.
57 * If the counter is already at its maximum value returns
58 * -EINVAL without updating it.
60 static int atomic_inc_return_safe(atomic_t *v)
64 counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0);
65 if (counter <= (unsigned int)INT_MAX)
73 /* Decrement the counter. Return the resulting value, or -EINVAL */
74 static int atomic_dec_return_safe(atomic_t *v)
78 counter = atomic_dec_return(v);
87 #define RBD_DRV_NAME "rbd"
89 #define RBD_MINORS_PER_MAJOR 256
90 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
92 #define RBD_MAX_PARENT_CHAIN_LEN 16
94 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
95 #define RBD_MAX_SNAP_NAME_LEN \
96 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
98 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
100 #define RBD_SNAP_HEAD_NAME "-"
102 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
104 /* This allows a single page to hold an image name sent by OSD */
105 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
106 #define RBD_IMAGE_ID_LEN_MAX 64
108 #define RBD_OBJ_PREFIX_LEN_MAX 64
110 #define RBD_NOTIFY_TIMEOUT 5 /* seconds */
111 #define RBD_RETRY_DELAY msecs_to_jiffies(1000)
115 #define RBD_FEATURE_LAYERING (1ULL<<0)
116 #define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
117 #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
118 #define RBD_FEATURE_DATA_POOL (1ULL<<7)
119 #define RBD_FEATURE_OPERATIONS (1ULL<<8)
121 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
122 RBD_FEATURE_STRIPINGV2 | \
123 RBD_FEATURE_EXCLUSIVE_LOCK | \
124 RBD_FEATURE_DATA_POOL | \
125 RBD_FEATURE_OPERATIONS)
127 /* Features supported by this (client software) implementation. */
129 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
132 * An RBD device name will be "rbd#", where the "rbd" comes from
133 * RBD_DRV_NAME above, and # is a unique integer identifier.
135 #define DEV_NAME_LEN 32
138 * block device image metadata (in-memory version)
140 struct rbd_image_header {
141 /* These six fields never change for a given rbd image */
147 u64 features; /* Might be changeable someday? */
149 /* The remaining fields need to be updated occasionally */
151 struct ceph_snap_context *snapc;
152 char *snap_names; /* format 1 only */
153 u64 *snap_sizes; /* format 1 only */
157 * An rbd image specification.
159 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
160 * identify an image. Each rbd_dev structure includes a pointer to
161 * an rbd_spec structure that encapsulates this identity.
163 * Each of the id's in an rbd_spec has an associated name. For a
164 * user-mapped image, the names are supplied and the id's associated
165 * with them are looked up. For a layered image, a parent image is
166 * defined by the tuple, and the names are looked up.
168 * An rbd_dev structure contains a parent_spec pointer which is
169 * non-null if the image it represents is a child in a layered
170 * image. This pointer will refer to the rbd_spec structure used
171 * by the parent rbd_dev for its own identity (i.e., the structure
172 * is shared between the parent and child).
174 * Since these structures are populated once, during the discovery
175 * phase of image construction, they are effectively immutable so
176 * we make no effort to synchronize access to them.
178 * Note that code herein does not assume the image name is known (it
179 * could be a null pointer).
183 const char *pool_name;
184 const char *pool_ns; /* NULL if default, never "" */
186 const char *image_id;
187 const char *image_name;
190 const char *snap_name;
196 * an instance of the client. multiple devices may share an rbd client.
199 struct ceph_client *client;
201 struct list_head node;
204 struct rbd_img_request;
206 enum obj_request_type {
207 OBJ_REQUEST_NODATA = 1,
208 OBJ_REQUEST_BIO, /* pointer into provided bio (list) */
209 OBJ_REQUEST_BVECS, /* pointer into provided bio_vec array */
210 OBJ_REQUEST_OWN_BVECS, /* private bio_vec array, doesn't own pages */
213 enum obj_operation_type {
221 * Writes go through the following state machine to deal with
225 * RBD_OBJ_WRITE_GUARD ---------------> RBD_OBJ_WRITE_COPYUP
227 * v \------------------------------/
233 * Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether
234 * there is a parent or not.
236 enum rbd_obj_write_state {
237 RBD_OBJ_WRITE_FLAT = 1,
239 RBD_OBJ_WRITE_COPYUP,
242 struct rbd_obj_request {
243 struct ceph_object_extent ex;
245 bool tried_parent; /* for reads */
246 enum rbd_obj_write_state write_state; /* for writes */
249 struct rbd_img_request *img_request;
250 struct ceph_file_extent *img_extents;
254 struct ceph_bio_iter bio_pos;
256 struct ceph_bvec_iter bvec_pos;
261 struct bio_vec *copyup_bvecs;
262 u32 copyup_bvec_count;
264 struct ceph_osd_request *osd_req;
266 u64 xferred; /* bytes transferred */
273 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
274 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
277 struct rbd_img_request {
278 struct rbd_device *rbd_dev;
279 enum obj_operation_type op_type;
280 enum obj_request_type data_type;
283 u64 snap_id; /* for reads */
284 struct ceph_snap_context *snapc; /* for writes */
287 struct request *rq; /* block request */
288 struct rbd_obj_request *obj_request; /* obj req initiator */
290 spinlock_t completion_lock;
291 u64 xferred;/* aggregate bytes transferred */
292 int result; /* first nonzero obj_request result */
294 struct list_head object_extents; /* obj_req.ex structs */
300 #define for_each_obj_request(ireq, oreq) \
301 list_for_each_entry(oreq, &(ireq)->object_extents, ex.oe_item)
302 #define for_each_obj_request_safe(ireq, oreq, n) \
303 list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item)
305 enum rbd_watch_state {
306 RBD_WATCH_STATE_UNREGISTERED,
307 RBD_WATCH_STATE_REGISTERED,
308 RBD_WATCH_STATE_ERROR,
311 enum rbd_lock_state {
312 RBD_LOCK_STATE_UNLOCKED,
313 RBD_LOCK_STATE_LOCKED,
314 RBD_LOCK_STATE_RELEASING,
317 /* WatchNotify::ClientId */
318 struct rbd_client_id {
332 int dev_id; /* blkdev unique id */
334 int major; /* blkdev assigned major */
336 struct gendisk *disk; /* blkdev's gendisk and rq */
338 u32 image_format; /* Either 1 or 2 */
339 struct rbd_client *rbd_client;
341 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
343 spinlock_t lock; /* queue, flags, open_count */
345 struct rbd_image_header header;
346 unsigned long flags; /* possibly lock protected */
347 struct rbd_spec *spec;
348 struct rbd_options *opts;
349 char *config_info; /* add{,_single_major} string */
351 struct ceph_object_id header_oid;
352 struct ceph_object_locator header_oloc;
354 struct ceph_file_layout layout; /* used for all rbd requests */
356 struct mutex watch_mutex;
357 enum rbd_watch_state watch_state;
358 struct ceph_osd_linger_request *watch_handle;
360 struct delayed_work watch_dwork;
362 struct rw_semaphore lock_rwsem;
363 enum rbd_lock_state lock_state;
364 char lock_cookie[32];
365 struct rbd_client_id owner_cid;
366 struct work_struct acquired_lock_work;
367 struct work_struct released_lock_work;
368 struct delayed_work lock_dwork;
369 struct work_struct unlock_work;
370 wait_queue_head_t lock_waitq;
372 struct workqueue_struct *task_wq;
374 struct rbd_spec *parent_spec;
377 struct rbd_device *parent;
379 /* Block layer tags. */
380 struct blk_mq_tag_set tag_set;
382 /* protects updating the header */
383 struct rw_semaphore header_rwsem;
385 struct rbd_mapping mapping;
387 struct list_head node;
391 unsigned long open_count; /* protected by lock */
395 * Flag bits for rbd_dev->flags:
396 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
398 * - BLACKLISTED is protected by rbd_dev->lock_rwsem
401 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
402 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
403 RBD_DEV_FLAG_BLACKLISTED, /* our ceph_client is blacklisted */
406 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
408 static LIST_HEAD(rbd_dev_list); /* devices */
409 static DEFINE_SPINLOCK(rbd_dev_list_lock);
411 static LIST_HEAD(rbd_client_list); /* clients */
412 static DEFINE_SPINLOCK(rbd_client_list_lock);
414 /* Slab caches for frequently-allocated structures */
416 static struct kmem_cache *rbd_img_request_cache;
417 static struct kmem_cache *rbd_obj_request_cache;
419 static int rbd_major;
420 static DEFINE_IDA(rbd_dev_id_ida);
422 static struct workqueue_struct *rbd_wq;
425 * single-major requires >= 0.75 version of userspace rbd utility.
427 static bool single_major = true;
428 module_param(single_major, bool, 0444);
429 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
431 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
433 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
435 static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
437 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
439 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
441 static int rbd_dev_id_to_minor(int dev_id)
443 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
446 static int minor_to_rbd_dev_id(int minor)
448 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
451 static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
453 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
454 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
457 static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
461 down_read(&rbd_dev->lock_rwsem);
462 is_lock_owner = __rbd_is_lock_owner(rbd_dev);
463 up_read(&rbd_dev->lock_rwsem);
464 return is_lock_owner;
467 static ssize_t rbd_supported_features_show(struct bus_type *bus, char *buf)
469 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
472 static BUS_ATTR(add, 0200, NULL, rbd_add);
473 static BUS_ATTR(remove, 0200, NULL, rbd_remove);
474 static BUS_ATTR(add_single_major, 0200, NULL, rbd_add_single_major);
475 static BUS_ATTR(remove_single_major, 0200, NULL, rbd_remove_single_major);
476 static BUS_ATTR(supported_features, 0444, rbd_supported_features_show, NULL);
478 static struct attribute *rbd_bus_attrs[] = {
480 &bus_attr_remove.attr,
481 &bus_attr_add_single_major.attr,
482 &bus_attr_remove_single_major.attr,
483 &bus_attr_supported_features.attr,
487 static umode_t rbd_bus_is_visible(struct kobject *kobj,
488 struct attribute *attr, int index)
491 (attr == &bus_attr_add_single_major.attr ||
492 attr == &bus_attr_remove_single_major.attr))
498 static const struct attribute_group rbd_bus_group = {
499 .attrs = rbd_bus_attrs,
500 .is_visible = rbd_bus_is_visible,
502 __ATTRIBUTE_GROUPS(rbd_bus);
504 static struct bus_type rbd_bus_type = {
506 .bus_groups = rbd_bus_groups,
509 static void rbd_root_dev_release(struct device *dev)
513 static struct device rbd_root_dev = {
515 .release = rbd_root_dev_release,
518 static __printf(2, 3)
519 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
521 struct va_format vaf;
529 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
530 else if (rbd_dev->disk)
531 printk(KERN_WARNING "%s: %s: %pV\n",
532 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
533 else if (rbd_dev->spec && rbd_dev->spec->image_name)
534 printk(KERN_WARNING "%s: image %s: %pV\n",
535 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
536 else if (rbd_dev->spec && rbd_dev->spec->image_id)
537 printk(KERN_WARNING "%s: id %s: %pV\n",
538 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
540 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
541 RBD_DRV_NAME, rbd_dev, &vaf);
546 #define rbd_assert(expr) \
547 if (unlikely(!(expr))) { \
548 printk(KERN_ERR "\nAssertion failure in %s() " \
550 "\trbd_assert(%s);\n\n", \
551 __func__, __LINE__, #expr); \
554 #else /* !RBD_DEBUG */
555 # define rbd_assert(expr) ((void) 0)
556 #endif /* !RBD_DEBUG */
558 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
560 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
561 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
562 static int rbd_dev_header_info(struct rbd_device *rbd_dev);
563 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
564 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
566 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
567 u8 *order, u64 *snap_size);
568 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
571 static int rbd_open(struct block_device *bdev, fmode_t mode)
573 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
574 bool removing = false;
576 spin_lock_irq(&rbd_dev->lock);
577 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
580 rbd_dev->open_count++;
581 spin_unlock_irq(&rbd_dev->lock);
585 (void) get_device(&rbd_dev->dev);
590 static void rbd_release(struct gendisk *disk, fmode_t mode)
592 struct rbd_device *rbd_dev = disk->private_data;
593 unsigned long open_count_before;
595 spin_lock_irq(&rbd_dev->lock);
596 open_count_before = rbd_dev->open_count--;
597 spin_unlock_irq(&rbd_dev->lock);
598 rbd_assert(open_count_before > 0);
600 put_device(&rbd_dev->dev);
603 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
607 if (get_user(ro, (int __user *)arg))
610 /* Snapshots can't be marked read-write */
611 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
614 /* Let blkdev_roset() handle it */
618 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
619 unsigned int cmd, unsigned long arg)
621 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
626 ret = rbd_ioctl_set_ro(rbd_dev, arg);
636 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
637 unsigned int cmd, unsigned long arg)
639 return rbd_ioctl(bdev, mode, cmd, arg);
641 #endif /* CONFIG_COMPAT */
643 static const struct block_device_operations rbd_bd_ops = {
644 .owner = THIS_MODULE,
646 .release = rbd_release,
649 .compat_ioctl = rbd_compat_ioctl,
654 * Initialize an rbd client instance. Success or not, this function
655 * consumes ceph_opts. Caller holds client_mutex.
657 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
659 struct rbd_client *rbdc;
662 dout("%s:\n", __func__);
663 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
667 kref_init(&rbdc->kref);
668 INIT_LIST_HEAD(&rbdc->node);
670 rbdc->client = ceph_create_client(ceph_opts, rbdc);
671 if (IS_ERR(rbdc->client))
673 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
675 ret = ceph_open_session(rbdc->client);
679 spin_lock(&rbd_client_list_lock);
680 list_add_tail(&rbdc->node, &rbd_client_list);
681 spin_unlock(&rbd_client_list_lock);
683 dout("%s: rbdc %p\n", __func__, rbdc);
687 ceph_destroy_client(rbdc->client);
692 ceph_destroy_options(ceph_opts);
693 dout("%s: error %d\n", __func__, ret);
698 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
700 kref_get(&rbdc->kref);
706 * Find a ceph client with specific addr and configuration. If
707 * found, bump its reference count.
709 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
711 struct rbd_client *client_node;
714 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
717 spin_lock(&rbd_client_list_lock);
718 list_for_each_entry(client_node, &rbd_client_list, node) {
719 if (!ceph_compare_options(ceph_opts, client_node->client)) {
720 __rbd_get_client(client_node);
726 spin_unlock(&rbd_client_list_lock);
728 return found ? client_node : NULL;
732 * (Per device) rbd map options
742 /* string args above */
751 static match_table_t rbd_opts_tokens = {
752 {Opt_queue_depth, "queue_depth=%d"},
753 {Opt_alloc_size, "alloc_size=%d"},
754 {Opt_lock_timeout, "lock_timeout=%d"},
756 {Opt_pool_ns, "_pool_ns=%s"},
757 /* string args above */
758 {Opt_read_only, "read_only"},
759 {Opt_read_only, "ro"}, /* Alternate spelling */
760 {Opt_read_write, "read_write"},
761 {Opt_read_write, "rw"}, /* Alternate spelling */
762 {Opt_lock_on_read, "lock_on_read"},
763 {Opt_exclusive, "exclusive"},
764 {Opt_notrim, "notrim"},
771 unsigned long lock_timeout;
778 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
779 #define RBD_ALLOC_SIZE_DEFAULT (64 * 1024)
780 #define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
781 #define RBD_READ_ONLY_DEFAULT false
782 #define RBD_LOCK_ON_READ_DEFAULT false
783 #define RBD_EXCLUSIVE_DEFAULT false
784 #define RBD_TRIM_DEFAULT true
786 struct parse_rbd_opts_ctx {
787 struct rbd_spec *spec;
788 struct rbd_options *opts;
791 static int parse_rbd_opts_token(char *c, void *private)
793 struct parse_rbd_opts_ctx *pctx = private;
794 substring_t argstr[MAX_OPT_ARGS];
795 int token, intval, ret;
797 token = match_token(c, rbd_opts_tokens, argstr);
798 if (token < Opt_last_int) {
799 ret = match_int(&argstr[0], &intval);
801 pr_err("bad option arg (not int) at '%s'\n", c);
804 dout("got int token %d val %d\n", token, intval);
805 } else if (token > Opt_last_int && token < Opt_last_string) {
806 dout("got string token %d val %s\n", token, argstr[0].from);
808 dout("got token %d\n", token);
812 case Opt_queue_depth:
814 pr_err("queue_depth out of range\n");
817 pctx->opts->queue_depth = intval;
821 pr_err("alloc_size out of range\n");
824 if (!is_power_of_2(intval)) {
825 pr_err("alloc_size must be a power of 2\n");
828 pctx->opts->alloc_size = intval;
830 case Opt_lock_timeout:
831 /* 0 is "wait forever" (i.e. infinite timeout) */
832 if (intval < 0 || intval > INT_MAX / 1000) {
833 pr_err("lock_timeout out of range\n");
836 pctx->opts->lock_timeout = msecs_to_jiffies(intval * 1000);
839 kfree(pctx->spec->pool_ns);
840 pctx->spec->pool_ns = match_strdup(argstr);
841 if (!pctx->spec->pool_ns)
845 pctx->opts->read_only = true;
848 pctx->opts->read_only = false;
850 case Opt_lock_on_read:
851 pctx->opts->lock_on_read = true;
854 pctx->opts->exclusive = true;
857 pctx->opts->trim = false;
860 /* libceph prints "bad option" msg */
867 static char* obj_op_name(enum obj_operation_type op_type)
884 * Destroy ceph client
886 * Caller must hold rbd_client_list_lock.
888 static void rbd_client_release(struct kref *kref)
890 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
892 dout("%s: rbdc %p\n", __func__, rbdc);
893 spin_lock(&rbd_client_list_lock);
894 list_del(&rbdc->node);
895 spin_unlock(&rbd_client_list_lock);
897 ceph_destroy_client(rbdc->client);
902 * Drop reference to ceph client node. If it's not referenced anymore, release
905 static void rbd_put_client(struct rbd_client *rbdc)
908 kref_put(&rbdc->kref, rbd_client_release);
911 static int wait_for_latest_osdmap(struct ceph_client *client)
916 ret = ceph_monc_get_version(&client->monc, "osdmap", &newest_epoch);
920 if (client->osdc.osdmap->epoch >= newest_epoch)
923 ceph_osdc_maybe_request_map(&client->osdc);
924 return ceph_monc_wait_osdmap(&client->monc, newest_epoch,
925 client->options->mount_timeout);
929 * Get a ceph client with specific addr and configuration, if one does
930 * not exist create it. Either way, ceph_opts is consumed by this
933 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
935 struct rbd_client *rbdc;
938 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
939 rbdc = rbd_client_find(ceph_opts);
941 ceph_destroy_options(ceph_opts);
944 * Using an existing client. Make sure ->pg_pools is up to
945 * date before we look up the pool id in do_rbd_add().
947 ret = wait_for_latest_osdmap(rbdc->client);
949 rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
950 rbd_put_client(rbdc);
954 rbdc = rbd_client_create(ceph_opts);
956 mutex_unlock(&client_mutex);
961 static bool rbd_image_format_valid(u32 image_format)
963 return image_format == 1 || image_format == 2;
966 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
971 /* The header has to start with the magic rbd header text */
972 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
975 /* The bio layer requires at least sector-sized I/O */
977 if (ondisk->options.order < SECTOR_SHIFT)
980 /* If we use u64 in a few spots we may be able to loosen this */
982 if (ondisk->options.order > 8 * sizeof (int) - 1)
986 * The size of a snapshot header has to fit in a size_t, and
987 * that limits the number of snapshots.
989 snap_count = le32_to_cpu(ondisk->snap_count);
990 size = SIZE_MAX - sizeof (struct ceph_snap_context);
991 if (snap_count > size / sizeof (__le64))
995 * Not only that, but the size of the entire the snapshot
996 * header must also be representable in a size_t.
998 size -= snap_count * sizeof (__le64);
999 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
1006 * returns the size of an object in the image
1008 static u32 rbd_obj_bytes(struct rbd_image_header *header)
1010 return 1U << header->obj_order;
1013 static void rbd_init_layout(struct rbd_device *rbd_dev)
1015 if (rbd_dev->header.stripe_unit == 0 ||
1016 rbd_dev->header.stripe_count == 0) {
1017 rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header);
1018 rbd_dev->header.stripe_count = 1;
1021 rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit;
1022 rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count;
1023 rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header);
1024 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ?
1025 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id;
1026 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
1030 * Fill an rbd image header with information from the given format 1
1033 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
1034 struct rbd_image_header_ondisk *ondisk)
1036 struct rbd_image_header *header = &rbd_dev->header;
1037 bool first_time = header->object_prefix == NULL;
1038 struct ceph_snap_context *snapc;
1039 char *object_prefix = NULL;
1040 char *snap_names = NULL;
1041 u64 *snap_sizes = NULL;
1046 /* Allocate this now to avoid having to handle failure below */
1049 object_prefix = kstrndup(ondisk->object_prefix,
1050 sizeof(ondisk->object_prefix),
1056 /* Allocate the snapshot context and fill it in */
1058 snap_count = le32_to_cpu(ondisk->snap_count);
1059 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
1062 snapc->seq = le64_to_cpu(ondisk->snap_seq);
1064 struct rbd_image_snap_ondisk *snaps;
1065 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
1067 /* We'll keep a copy of the snapshot names... */
1069 if (snap_names_len > (u64)SIZE_MAX)
1071 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
1075 /* ...as well as the array of their sizes. */
1076 snap_sizes = kmalloc_array(snap_count,
1077 sizeof(*header->snap_sizes),
1083 * Copy the names, and fill in each snapshot's id
1086 * Note that rbd_dev_v1_header_info() guarantees the
1087 * ondisk buffer we're working with has
1088 * snap_names_len bytes beyond the end of the
1089 * snapshot id array, this memcpy() is safe.
1091 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
1092 snaps = ondisk->snaps;
1093 for (i = 0; i < snap_count; i++) {
1094 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
1095 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
1099 /* We won't fail any more, fill in the header */
1102 header->object_prefix = object_prefix;
1103 header->obj_order = ondisk->options.order;
1104 rbd_init_layout(rbd_dev);
1106 ceph_put_snap_context(header->snapc);
1107 kfree(header->snap_names);
1108 kfree(header->snap_sizes);
1111 /* The remaining fields always get updated (when we refresh) */
1113 header->image_size = le64_to_cpu(ondisk->image_size);
1114 header->snapc = snapc;
1115 header->snap_names = snap_names;
1116 header->snap_sizes = snap_sizes;
1124 ceph_put_snap_context(snapc);
1125 kfree(object_prefix);
1130 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1132 const char *snap_name;
1134 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1136 /* Skip over names until we find the one we are looking for */
1138 snap_name = rbd_dev->header.snap_names;
1140 snap_name += strlen(snap_name) + 1;
1142 return kstrdup(snap_name, GFP_KERNEL);
1146 * Snapshot id comparison function for use with qsort()/bsearch().
1147 * Note that result is for snapshots in *descending* order.
1149 static int snapid_compare_reverse(const void *s1, const void *s2)
1151 u64 snap_id1 = *(u64 *)s1;
1152 u64 snap_id2 = *(u64 *)s2;
1154 if (snap_id1 < snap_id2)
1156 return snap_id1 == snap_id2 ? 0 : -1;
1160 * Search a snapshot context to see if the given snapshot id is
1163 * Returns the position of the snapshot id in the array if it's found,
1164 * or BAD_SNAP_INDEX otherwise.
1166 * Note: The snapshot array is in kept sorted (by the osd) in
1167 * reverse order, highest snapshot id first.
1169 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1171 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1174 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1175 sizeof (snap_id), snapid_compare_reverse);
1177 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1180 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1184 const char *snap_name;
1186 which = rbd_dev_snap_index(rbd_dev, snap_id);
1187 if (which == BAD_SNAP_INDEX)
1188 return ERR_PTR(-ENOENT);
1190 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1191 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1194 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1196 if (snap_id == CEPH_NOSNAP)
1197 return RBD_SNAP_HEAD_NAME;
1199 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1200 if (rbd_dev->image_format == 1)
1201 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1203 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1206 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1209 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1210 if (snap_id == CEPH_NOSNAP) {
1211 *snap_size = rbd_dev->header.image_size;
1212 } else if (rbd_dev->image_format == 1) {
1215 which = rbd_dev_snap_index(rbd_dev, snap_id);
1216 if (which == BAD_SNAP_INDEX)
1219 *snap_size = rbd_dev->header.snap_sizes[which];
1224 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1233 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1236 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1237 if (snap_id == CEPH_NOSNAP) {
1238 *snap_features = rbd_dev->header.features;
1239 } else if (rbd_dev->image_format == 1) {
1240 *snap_features = 0; /* No features for format 1 */
1245 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1249 *snap_features = features;
1254 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1256 u64 snap_id = rbd_dev->spec->snap_id;
1261 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1264 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1268 rbd_dev->mapping.size = size;
1269 rbd_dev->mapping.features = features;
1274 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1276 rbd_dev->mapping.size = 0;
1277 rbd_dev->mapping.features = 0;
1280 static void zero_bvec(struct bio_vec *bv)
1283 unsigned long flags;
1285 buf = bvec_kmap_irq(bv, &flags);
1286 memset(buf, 0, bv->bv_len);
1287 flush_dcache_page(bv->bv_page);
1288 bvec_kunmap_irq(buf, &flags);
1291 static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes)
1293 struct ceph_bio_iter it = *bio_pos;
1295 ceph_bio_iter_advance(&it, off);
1296 ceph_bio_iter_advance_step(&it, bytes, ({
1301 static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes)
1303 struct ceph_bvec_iter it = *bvec_pos;
1305 ceph_bvec_iter_advance(&it, off);
1306 ceph_bvec_iter_advance_step(&it, bytes, ({
1312 * Zero a range in @obj_req data buffer defined by a bio (list) or
1313 * (private) bio_vec array.
1315 * @off is relative to the start of the data buffer.
1317 static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off,
1320 switch (obj_req->img_request->data_type) {
1321 case OBJ_REQUEST_BIO:
1322 zero_bios(&obj_req->bio_pos, off, bytes);
1324 case OBJ_REQUEST_BVECS:
1325 case OBJ_REQUEST_OWN_BVECS:
1326 zero_bvecs(&obj_req->bvec_pos, off, bytes);
1333 static void rbd_obj_request_destroy(struct kref *kref);
1334 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1336 rbd_assert(obj_request != NULL);
1337 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1338 kref_read(&obj_request->kref));
1339 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1342 static void rbd_img_request_get(struct rbd_img_request *img_request)
1344 dout("%s: img %p (was %d)\n", __func__, img_request,
1345 kref_read(&img_request->kref));
1346 kref_get(&img_request->kref);
1349 static void rbd_img_request_destroy(struct kref *kref);
1350 static void rbd_img_request_put(struct rbd_img_request *img_request)
1352 rbd_assert(img_request != NULL);
1353 dout("%s: img %p (was %d)\n", __func__, img_request,
1354 kref_read(&img_request->kref));
1355 kref_put(&img_request->kref, rbd_img_request_destroy);
1358 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1359 struct rbd_obj_request *obj_request)
1361 rbd_assert(obj_request->img_request == NULL);
1363 /* Image request now owns object's original reference */
1364 obj_request->img_request = img_request;
1365 img_request->pending_count++;
1366 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1369 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1370 struct rbd_obj_request *obj_request)
1372 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1373 list_del(&obj_request->ex.oe_item);
1374 rbd_assert(obj_request->img_request == img_request);
1375 rbd_obj_request_put(obj_request);
1378 static void rbd_obj_request_submit(struct rbd_obj_request *obj_request)
1380 struct ceph_osd_request *osd_req = obj_request->osd_req;
1382 dout("%s %p object_no %016llx %llu~%llu osd_req %p\n", __func__,
1383 obj_request, obj_request->ex.oe_objno, obj_request->ex.oe_off,
1384 obj_request->ex.oe_len, osd_req);
1385 ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
1389 * The default/initial value for all image request flags is 0. Each
1390 * is conditionally set to 1 at image request initialization time
1391 * and currently never change thereafter.
1393 static void img_request_layered_set(struct rbd_img_request *img_request)
1395 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1399 static void img_request_layered_clear(struct rbd_img_request *img_request)
1401 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1405 static bool img_request_layered_test(struct rbd_img_request *img_request)
1408 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1411 static bool rbd_obj_is_entire(struct rbd_obj_request *obj_req)
1413 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1415 return !obj_req->ex.oe_off &&
1416 obj_req->ex.oe_len == rbd_dev->layout.object_size;
1419 static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
1421 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1423 return obj_req->ex.oe_off + obj_req->ex.oe_len ==
1424 rbd_dev->layout.object_size;
1428 * Must be called after rbd_obj_calc_img_extents().
1430 static bool rbd_obj_copyup_enabled(struct rbd_obj_request *obj_req)
1432 if (!obj_req->num_img_extents ||
1433 rbd_obj_is_entire(obj_req))
1439 static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
1441 return ceph_file_extents_bytes(obj_req->img_extents,
1442 obj_req->num_img_extents);
1445 static bool rbd_img_is_write(struct rbd_img_request *img_req)
1447 switch (img_req->op_type) {
1451 case OBJ_OP_DISCARD:
1452 case OBJ_OP_ZEROOUT:
1459 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req);
1461 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
1463 struct rbd_obj_request *obj_req = osd_req->r_priv;
1465 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
1466 osd_req->r_result, obj_req);
1467 rbd_assert(osd_req == obj_req->osd_req);
1469 obj_req->result = osd_req->r_result < 0 ? osd_req->r_result : 0;
1470 if (!obj_req->result && !rbd_img_is_write(obj_req->img_request))
1471 obj_req->xferred = osd_req->r_result;
1474 * Writes aren't allowed to return a data payload. In some
1475 * guarded write cases (e.g. stat + zero on an empty object)
1476 * a stat response makes it through, but we don't care.
1478 obj_req->xferred = 0;
1480 rbd_obj_handle_request(obj_req);
1483 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1485 struct ceph_osd_request *osd_req = obj_request->osd_req;
1487 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1488 osd_req->r_snapid = obj_request->img_request->snap_id;
1491 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1493 struct ceph_osd_request *osd_req = obj_request->osd_req;
1495 osd_req->r_flags = CEPH_OSD_FLAG_WRITE;
1496 ktime_get_real_ts64(&osd_req->r_mtime);
1497 osd_req->r_data_offset = obj_request->ex.oe_off;
1500 static struct ceph_osd_request *
1501 __rbd_osd_req_create(struct rbd_obj_request *obj_req,
1502 struct ceph_snap_context *snapc, unsigned int num_ops)
1504 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1505 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1506 struct ceph_osd_request *req;
1507 const char *name_format = rbd_dev->image_format == 1 ?
1508 RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT;
1510 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, GFP_NOIO);
1514 req->r_callback = rbd_osd_req_callback;
1515 req->r_priv = obj_req;
1518 * Data objects may be stored in a separate pool, but always in
1519 * the same namespace in that pool as the header in its pool.
1521 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
1522 req->r_base_oloc.pool = rbd_dev->layout.pool_id;
1524 if (ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format,
1525 rbd_dev->header.object_prefix, obj_req->ex.oe_objno))
1531 ceph_osdc_put_request(req);
1535 static struct ceph_osd_request *
1536 rbd_osd_req_create(struct rbd_obj_request *obj_req, unsigned int num_ops)
1538 return __rbd_osd_req_create(obj_req, obj_req->img_request->snapc,
1542 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
1544 ceph_osdc_put_request(osd_req);
1547 static struct rbd_obj_request *rbd_obj_request_create(void)
1549 struct rbd_obj_request *obj_request;
1551 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
1555 ceph_object_extent_init(&obj_request->ex);
1556 kref_init(&obj_request->kref);
1558 dout("%s %p\n", __func__, obj_request);
1562 static void rbd_obj_request_destroy(struct kref *kref)
1564 struct rbd_obj_request *obj_request;
1567 obj_request = container_of(kref, struct rbd_obj_request, kref);
1569 dout("%s: obj %p\n", __func__, obj_request);
1571 if (obj_request->osd_req)
1572 rbd_osd_req_destroy(obj_request->osd_req);
1574 switch (obj_request->img_request->data_type) {
1575 case OBJ_REQUEST_NODATA:
1576 case OBJ_REQUEST_BIO:
1577 case OBJ_REQUEST_BVECS:
1578 break; /* Nothing to do */
1579 case OBJ_REQUEST_OWN_BVECS:
1580 kfree(obj_request->bvec_pos.bvecs);
1586 kfree(obj_request->img_extents);
1587 if (obj_request->copyup_bvecs) {
1588 for (i = 0; i < obj_request->copyup_bvec_count; i++) {
1589 if (obj_request->copyup_bvecs[i].bv_page)
1590 __free_page(obj_request->copyup_bvecs[i].bv_page);
1592 kfree(obj_request->copyup_bvecs);
1595 kmem_cache_free(rbd_obj_request_cache, obj_request);
1598 /* It's OK to call this for a device with no parent */
1600 static void rbd_spec_put(struct rbd_spec *spec);
1601 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1603 rbd_dev_remove_parent(rbd_dev);
1604 rbd_spec_put(rbd_dev->parent_spec);
1605 rbd_dev->parent_spec = NULL;
1606 rbd_dev->parent_overlap = 0;
1610 * Parent image reference counting is used to determine when an
1611 * image's parent fields can be safely torn down--after there are no
1612 * more in-flight requests to the parent image. When the last
1613 * reference is dropped, cleaning them up is safe.
1615 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1619 if (!rbd_dev->parent_spec)
1622 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1626 /* Last reference; clean up parent data structures */
1629 rbd_dev_unparent(rbd_dev);
1631 rbd_warn(rbd_dev, "parent reference underflow");
1635 * If an image has a non-zero parent overlap, get a reference to its
1638 * Returns true if the rbd device has a parent with a non-zero
1639 * overlap and a reference for it was successfully taken, or
1642 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1646 if (!rbd_dev->parent_spec)
1649 down_read(&rbd_dev->header_rwsem);
1650 if (rbd_dev->parent_overlap)
1651 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1652 up_read(&rbd_dev->header_rwsem);
1655 rbd_warn(rbd_dev, "parent reference overflow");
1661 * Caller is responsible for filling in the list of object requests
1662 * that comprises the image request, and the Linux request pointer
1663 * (if there is one).
1665 static struct rbd_img_request *rbd_img_request_create(
1666 struct rbd_device *rbd_dev,
1667 enum obj_operation_type op_type,
1668 struct ceph_snap_context *snapc)
1670 struct rbd_img_request *img_request;
1672 img_request = kmem_cache_zalloc(rbd_img_request_cache, GFP_NOIO);
1676 img_request->rbd_dev = rbd_dev;
1677 img_request->op_type = op_type;
1678 if (!rbd_img_is_write(img_request))
1679 img_request->snap_id = rbd_dev->spec->snap_id;
1681 img_request->snapc = snapc;
1683 if (rbd_dev_parent_get(rbd_dev))
1684 img_request_layered_set(img_request);
1686 spin_lock_init(&img_request->completion_lock);
1687 INIT_LIST_HEAD(&img_request->object_extents);
1688 kref_init(&img_request->kref);
1690 dout("%s: rbd_dev %p %s -> img %p\n", __func__, rbd_dev,
1691 obj_op_name(op_type), img_request);
1695 static void rbd_img_request_destroy(struct kref *kref)
1697 struct rbd_img_request *img_request;
1698 struct rbd_obj_request *obj_request;
1699 struct rbd_obj_request *next_obj_request;
1701 img_request = container_of(kref, struct rbd_img_request, kref);
1703 dout("%s: img %p\n", __func__, img_request);
1705 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1706 rbd_img_obj_request_del(img_request, obj_request);
1708 if (img_request_layered_test(img_request)) {
1709 img_request_layered_clear(img_request);
1710 rbd_dev_parent_put(img_request->rbd_dev);
1713 if (rbd_img_is_write(img_request))
1714 ceph_put_snap_context(img_request->snapc);
1716 kmem_cache_free(rbd_img_request_cache, img_request);
1719 static void prune_extents(struct ceph_file_extent *img_extents,
1720 u32 *num_img_extents, u64 overlap)
1722 u32 cnt = *num_img_extents;
1724 /* drop extents completely beyond the overlap */
1725 while (cnt && img_extents[cnt - 1].fe_off >= overlap)
1729 struct ceph_file_extent *ex = &img_extents[cnt - 1];
1731 /* trim final overlapping extent */
1732 if (ex->fe_off + ex->fe_len > overlap)
1733 ex->fe_len = overlap - ex->fe_off;
1736 *num_img_extents = cnt;
1740 * Determine the byte range(s) covered by either just the object extent
1741 * or the entire object in the parent image.
1743 static int rbd_obj_calc_img_extents(struct rbd_obj_request *obj_req,
1746 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1749 if (!rbd_dev->parent_overlap)
1752 ret = ceph_extent_to_file(&rbd_dev->layout, obj_req->ex.oe_objno,
1753 entire ? 0 : obj_req->ex.oe_off,
1754 entire ? rbd_dev->layout.object_size :
1756 &obj_req->img_extents,
1757 &obj_req->num_img_extents);
1761 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
1762 rbd_dev->parent_overlap);
1766 static void rbd_osd_req_setup_data(struct rbd_obj_request *obj_req, u32 which)
1768 switch (obj_req->img_request->data_type) {
1769 case OBJ_REQUEST_BIO:
1770 osd_req_op_extent_osd_data_bio(obj_req->osd_req, which,
1772 obj_req->ex.oe_len);
1774 case OBJ_REQUEST_BVECS:
1775 case OBJ_REQUEST_OWN_BVECS:
1776 rbd_assert(obj_req->bvec_pos.iter.bi_size ==
1777 obj_req->ex.oe_len);
1778 rbd_assert(obj_req->bvec_idx == obj_req->bvec_count);
1779 osd_req_op_extent_osd_data_bvec_pos(obj_req->osd_req, which,
1780 &obj_req->bvec_pos);
1787 static int rbd_obj_setup_read(struct rbd_obj_request *obj_req)
1789 obj_req->osd_req = __rbd_osd_req_create(obj_req, NULL, 1);
1790 if (!obj_req->osd_req)
1793 osd_req_op_extent_init(obj_req->osd_req, 0, CEPH_OSD_OP_READ,
1794 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
1795 rbd_osd_req_setup_data(obj_req, 0);
1797 rbd_osd_req_format_read(obj_req);
1801 static int __rbd_obj_setup_stat(struct rbd_obj_request *obj_req,
1804 struct page **pages;
1807 * The response data for a STAT call consists of:
1814 pages = ceph_alloc_page_vector(1, GFP_NOIO);
1816 return PTR_ERR(pages);
1818 osd_req_op_init(obj_req->osd_req, which, CEPH_OSD_OP_STAT, 0);
1819 osd_req_op_raw_data_in_pages(obj_req->osd_req, which, pages,
1820 8 + sizeof(struct ceph_timespec),
1825 static int count_write_ops(struct rbd_obj_request *obj_req)
1827 return 2; /* setallochint + write/writefull */
1830 static void __rbd_obj_setup_write(struct rbd_obj_request *obj_req,
1833 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1836 osd_req_op_alloc_hint_init(obj_req->osd_req, which++,
1837 rbd_dev->layout.object_size,
1838 rbd_dev->layout.object_size);
1840 if (rbd_obj_is_entire(obj_req))
1841 opcode = CEPH_OSD_OP_WRITEFULL;
1843 opcode = CEPH_OSD_OP_WRITE;
1845 osd_req_op_extent_init(obj_req->osd_req, which, opcode,
1846 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
1847 rbd_osd_req_setup_data(obj_req, which++);
1849 rbd_assert(which == obj_req->osd_req->r_num_ops);
1850 rbd_osd_req_format_write(obj_req);
1853 static int rbd_obj_setup_write(struct rbd_obj_request *obj_req)
1855 unsigned int num_osd_ops, which = 0;
1859 /* reverse map the entire object onto the parent */
1860 ret = rbd_obj_calc_img_extents(obj_req, true);
1864 need_guard = rbd_obj_copyup_enabled(obj_req);
1865 num_osd_ops = need_guard + count_write_ops(obj_req);
1867 obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
1868 if (!obj_req->osd_req)
1872 ret = __rbd_obj_setup_stat(obj_req, which++);
1876 obj_req->write_state = RBD_OBJ_WRITE_GUARD;
1878 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
1881 __rbd_obj_setup_write(obj_req, which);
1885 static u16 truncate_or_zero_opcode(struct rbd_obj_request *obj_req)
1887 return rbd_obj_is_tail(obj_req) ? CEPH_OSD_OP_TRUNCATE :
1891 static int rbd_obj_setup_discard(struct rbd_obj_request *obj_req)
1893 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1894 u64 off = obj_req->ex.oe_off;
1895 u64 next_off = obj_req->ex.oe_off + obj_req->ex.oe_len;
1899 * Align the range to alloc_size boundary and punt on discards
1900 * that are too small to free up any space.
1902 * alloc_size == object_size && is_tail() is a special case for
1903 * filestore with filestore_punch_hole = false, needed to allow
1904 * truncate (in addition to delete).
1906 if (rbd_dev->opts->alloc_size != rbd_dev->layout.object_size ||
1907 !rbd_obj_is_tail(obj_req)) {
1908 off = round_up(off, rbd_dev->opts->alloc_size);
1909 next_off = round_down(next_off, rbd_dev->opts->alloc_size);
1910 if (off >= next_off)
1914 /* reverse map the entire object onto the parent */
1915 ret = rbd_obj_calc_img_extents(obj_req, true);
1919 obj_req->osd_req = rbd_osd_req_create(obj_req, 1);
1920 if (!obj_req->osd_req)
1923 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents) {
1924 osd_req_op_init(obj_req->osd_req, 0, CEPH_OSD_OP_DELETE, 0);
1926 dout("%s %p %llu~%llu -> %llu~%llu\n", __func__,
1927 obj_req, obj_req->ex.oe_off, obj_req->ex.oe_len,
1928 off, next_off - off);
1929 osd_req_op_extent_init(obj_req->osd_req, 0,
1930 truncate_or_zero_opcode(obj_req),
1931 off, next_off - off, 0, 0);
1934 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
1935 rbd_osd_req_format_write(obj_req);
1939 static int count_zeroout_ops(struct rbd_obj_request *obj_req)
1943 if (rbd_obj_is_entire(obj_req) && obj_req->num_img_extents)
1944 num_osd_ops = 2; /* create + truncate */
1946 num_osd_ops = 1; /* delete/truncate/zero */
1951 static void __rbd_obj_setup_zeroout(struct rbd_obj_request *obj_req,
1956 if (rbd_obj_is_entire(obj_req)) {
1957 if (obj_req->num_img_extents) {
1958 osd_req_op_init(obj_req->osd_req, which++,
1959 CEPH_OSD_OP_CREATE, 0);
1960 opcode = CEPH_OSD_OP_TRUNCATE;
1962 osd_req_op_init(obj_req->osd_req, which++,
1963 CEPH_OSD_OP_DELETE, 0);
1967 opcode = truncate_or_zero_opcode(obj_req);
1971 osd_req_op_extent_init(obj_req->osd_req, which++, opcode,
1972 obj_req->ex.oe_off, obj_req->ex.oe_len,
1975 rbd_assert(which == obj_req->osd_req->r_num_ops);
1976 rbd_osd_req_format_write(obj_req);
1979 static int rbd_obj_setup_zeroout(struct rbd_obj_request *obj_req)
1981 unsigned int num_osd_ops, which = 0;
1985 /* reverse map the entire object onto the parent */
1986 ret = rbd_obj_calc_img_extents(obj_req, true);
1990 need_guard = rbd_obj_copyup_enabled(obj_req);
1991 num_osd_ops = need_guard + count_zeroout_ops(obj_req);
1993 obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
1994 if (!obj_req->osd_req)
1998 ret = __rbd_obj_setup_stat(obj_req, which++);
2002 obj_req->write_state = RBD_OBJ_WRITE_GUARD;
2004 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
2007 __rbd_obj_setup_zeroout(obj_req, which);
2012 * For each object request in @img_req, allocate an OSD request, add
2013 * individual OSD ops and prepare them for submission. The number of
2014 * OSD ops depends on op_type and the overlap point (if any).
2016 static int __rbd_img_fill_request(struct rbd_img_request *img_req)
2018 struct rbd_obj_request *obj_req, *next_obj_req;
2021 for_each_obj_request_safe(img_req, obj_req, next_obj_req) {
2022 switch (img_req->op_type) {
2024 ret = rbd_obj_setup_read(obj_req);
2027 ret = rbd_obj_setup_write(obj_req);
2029 case OBJ_OP_DISCARD:
2030 ret = rbd_obj_setup_discard(obj_req);
2032 case OBJ_OP_ZEROOUT:
2033 ret = rbd_obj_setup_zeroout(obj_req);
2041 img_req->xferred += obj_req->ex.oe_len;
2042 img_req->pending_count--;
2043 rbd_img_obj_request_del(img_req, obj_req);
2047 ret = ceph_osdc_alloc_messages(obj_req->osd_req, GFP_NOIO);
2055 union rbd_img_fill_iter {
2056 struct ceph_bio_iter bio_iter;
2057 struct ceph_bvec_iter bvec_iter;
2060 struct rbd_img_fill_ctx {
2061 enum obj_request_type pos_type;
2062 union rbd_img_fill_iter *pos;
2063 union rbd_img_fill_iter iter;
2064 ceph_object_extent_fn_t set_pos_fn;
2065 ceph_object_extent_fn_t count_fn;
2066 ceph_object_extent_fn_t copy_fn;
2069 static struct ceph_object_extent *alloc_object_extent(void *arg)
2071 struct rbd_img_request *img_req = arg;
2072 struct rbd_obj_request *obj_req;
2074 obj_req = rbd_obj_request_create();
2078 rbd_img_obj_request_add(img_req, obj_req);
2079 return &obj_req->ex;
2083 * While su != os && sc == 1 is technically not fancy (it's the same
2084 * layout as su == os && sc == 1), we can't use the nocopy path for it
2085 * because ->set_pos_fn() should be called only once per object.
2086 * ceph_file_to_extents() invokes action_fn once per stripe unit, so
2087 * treat su != os && sc == 1 as fancy.
2089 static bool rbd_layout_is_fancy(struct ceph_file_layout *l)
2091 return l->stripe_unit != l->object_size;
2094 static int rbd_img_fill_request_nocopy(struct rbd_img_request *img_req,
2095 struct ceph_file_extent *img_extents,
2096 u32 num_img_extents,
2097 struct rbd_img_fill_ctx *fctx)
2102 img_req->data_type = fctx->pos_type;
2105 * Create object requests and set each object request's starting
2106 * position in the provided bio (list) or bio_vec array.
2108 fctx->iter = *fctx->pos;
2109 for (i = 0; i < num_img_extents; i++) {
2110 ret = ceph_file_to_extents(&img_req->rbd_dev->layout,
2111 img_extents[i].fe_off,
2112 img_extents[i].fe_len,
2113 &img_req->object_extents,
2114 alloc_object_extent, img_req,
2115 fctx->set_pos_fn, &fctx->iter);
2120 return __rbd_img_fill_request(img_req);
2124 * Map a list of image extents to a list of object extents, create the
2125 * corresponding object requests (normally each to a different object,
2126 * but not always) and add them to @img_req. For each object request,
2127 * set up its data descriptor to point to the corresponding chunk(s) of
2128 * @fctx->pos data buffer.
2130 * Because ceph_file_to_extents() will merge adjacent object extents
2131 * together, each object request's data descriptor may point to multiple
2132 * different chunks of @fctx->pos data buffer.
2134 * @fctx->pos data buffer is assumed to be large enough.
2136 static int rbd_img_fill_request(struct rbd_img_request *img_req,
2137 struct ceph_file_extent *img_extents,
2138 u32 num_img_extents,
2139 struct rbd_img_fill_ctx *fctx)
2141 struct rbd_device *rbd_dev = img_req->rbd_dev;
2142 struct rbd_obj_request *obj_req;
2146 if (fctx->pos_type == OBJ_REQUEST_NODATA ||
2147 !rbd_layout_is_fancy(&rbd_dev->layout))
2148 return rbd_img_fill_request_nocopy(img_req, img_extents,
2149 num_img_extents, fctx);
2151 img_req->data_type = OBJ_REQUEST_OWN_BVECS;
2154 * Create object requests and determine ->bvec_count for each object
2155 * request. Note that ->bvec_count sum over all object requests may
2156 * be greater than the number of bio_vecs in the provided bio (list)
2157 * or bio_vec array because when mapped, those bio_vecs can straddle
2158 * stripe unit boundaries.
2160 fctx->iter = *fctx->pos;
2161 for (i = 0; i < num_img_extents; i++) {
2162 ret = ceph_file_to_extents(&rbd_dev->layout,
2163 img_extents[i].fe_off,
2164 img_extents[i].fe_len,
2165 &img_req->object_extents,
2166 alloc_object_extent, img_req,
2167 fctx->count_fn, &fctx->iter);
2172 for_each_obj_request(img_req, obj_req) {
2173 obj_req->bvec_pos.bvecs = kmalloc_array(obj_req->bvec_count,
2174 sizeof(*obj_req->bvec_pos.bvecs),
2176 if (!obj_req->bvec_pos.bvecs)
2181 * Fill in each object request's private bio_vec array, splitting and
2182 * rearranging the provided bio_vecs in stripe unit chunks as needed.
2184 fctx->iter = *fctx->pos;
2185 for (i = 0; i < num_img_extents; i++) {
2186 ret = ceph_iterate_extents(&rbd_dev->layout,
2187 img_extents[i].fe_off,
2188 img_extents[i].fe_len,
2189 &img_req->object_extents,
2190 fctx->copy_fn, &fctx->iter);
2195 return __rbd_img_fill_request(img_req);
2198 static int rbd_img_fill_nodata(struct rbd_img_request *img_req,
2201 struct ceph_file_extent ex = { off, len };
2202 union rbd_img_fill_iter dummy;
2203 struct rbd_img_fill_ctx fctx = {
2204 .pos_type = OBJ_REQUEST_NODATA,
2208 return rbd_img_fill_request(img_req, &ex, 1, &fctx);
2211 static void set_bio_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2213 struct rbd_obj_request *obj_req =
2214 container_of(ex, struct rbd_obj_request, ex);
2215 struct ceph_bio_iter *it = arg;
2217 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2218 obj_req->bio_pos = *it;
2219 ceph_bio_iter_advance(it, bytes);
2222 static void count_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2224 struct rbd_obj_request *obj_req =
2225 container_of(ex, struct rbd_obj_request, ex);
2226 struct ceph_bio_iter *it = arg;
2228 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2229 ceph_bio_iter_advance_step(it, bytes, ({
2230 obj_req->bvec_count++;
2235 static void copy_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2237 struct rbd_obj_request *obj_req =
2238 container_of(ex, struct rbd_obj_request, ex);
2239 struct ceph_bio_iter *it = arg;
2241 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2242 ceph_bio_iter_advance_step(it, bytes, ({
2243 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2244 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2248 static int __rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2249 struct ceph_file_extent *img_extents,
2250 u32 num_img_extents,
2251 struct ceph_bio_iter *bio_pos)
2253 struct rbd_img_fill_ctx fctx = {
2254 .pos_type = OBJ_REQUEST_BIO,
2255 .pos = (union rbd_img_fill_iter *)bio_pos,
2256 .set_pos_fn = set_bio_pos,
2257 .count_fn = count_bio_bvecs,
2258 .copy_fn = copy_bio_bvecs,
2261 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2265 static int rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2266 u64 off, u64 len, struct bio *bio)
2268 struct ceph_file_extent ex = { off, len };
2269 struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter };
2271 return __rbd_img_fill_from_bio(img_req, &ex, 1, &it);
2274 static void set_bvec_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2276 struct rbd_obj_request *obj_req =
2277 container_of(ex, struct rbd_obj_request, ex);
2278 struct ceph_bvec_iter *it = arg;
2280 obj_req->bvec_pos = *it;
2281 ceph_bvec_iter_shorten(&obj_req->bvec_pos, bytes);
2282 ceph_bvec_iter_advance(it, bytes);
2285 static void count_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2287 struct rbd_obj_request *obj_req =
2288 container_of(ex, struct rbd_obj_request, ex);
2289 struct ceph_bvec_iter *it = arg;
2291 ceph_bvec_iter_advance_step(it, bytes, ({
2292 obj_req->bvec_count++;
2296 static void copy_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2298 struct rbd_obj_request *obj_req =
2299 container_of(ex, struct rbd_obj_request, ex);
2300 struct ceph_bvec_iter *it = arg;
2302 ceph_bvec_iter_advance_step(it, bytes, ({
2303 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2304 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2308 static int __rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2309 struct ceph_file_extent *img_extents,
2310 u32 num_img_extents,
2311 struct ceph_bvec_iter *bvec_pos)
2313 struct rbd_img_fill_ctx fctx = {
2314 .pos_type = OBJ_REQUEST_BVECS,
2315 .pos = (union rbd_img_fill_iter *)bvec_pos,
2316 .set_pos_fn = set_bvec_pos,
2317 .count_fn = count_bvecs,
2318 .copy_fn = copy_bvecs,
2321 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2325 static int rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2326 struct ceph_file_extent *img_extents,
2327 u32 num_img_extents,
2328 struct bio_vec *bvecs)
2330 struct ceph_bvec_iter it = {
2332 .iter = { .bi_size = ceph_file_extents_bytes(img_extents,
2336 return __rbd_img_fill_from_bvecs(img_req, img_extents, num_img_extents,
2340 static void rbd_img_request_submit(struct rbd_img_request *img_request)
2342 struct rbd_obj_request *obj_request;
2344 dout("%s: img %p\n", __func__, img_request);
2346 rbd_img_request_get(img_request);
2347 for_each_obj_request(img_request, obj_request)
2348 rbd_obj_request_submit(obj_request);
2350 rbd_img_request_put(img_request);
2353 static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
2355 struct rbd_img_request *img_req = obj_req->img_request;
2356 struct rbd_img_request *child_img_req;
2359 child_img_req = rbd_img_request_create(img_req->rbd_dev->parent,
2364 __set_bit(IMG_REQ_CHILD, &child_img_req->flags);
2365 child_img_req->obj_request = obj_req;
2367 if (!rbd_img_is_write(img_req)) {
2368 switch (img_req->data_type) {
2369 case OBJ_REQUEST_BIO:
2370 ret = __rbd_img_fill_from_bio(child_img_req,
2371 obj_req->img_extents,
2372 obj_req->num_img_extents,
2375 case OBJ_REQUEST_BVECS:
2376 case OBJ_REQUEST_OWN_BVECS:
2377 ret = __rbd_img_fill_from_bvecs(child_img_req,
2378 obj_req->img_extents,
2379 obj_req->num_img_extents,
2380 &obj_req->bvec_pos);
2386 ret = rbd_img_fill_from_bvecs(child_img_req,
2387 obj_req->img_extents,
2388 obj_req->num_img_extents,
2389 obj_req->copyup_bvecs);
2392 rbd_img_request_put(child_img_req);
2396 rbd_img_request_submit(child_img_req);
2400 static bool rbd_obj_handle_read(struct rbd_obj_request *obj_req)
2402 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2405 if (obj_req->result == -ENOENT &&
2406 rbd_dev->parent_overlap && !obj_req->tried_parent) {
2407 /* reverse map this object extent onto the parent */
2408 ret = rbd_obj_calc_img_extents(obj_req, false);
2410 obj_req->result = ret;
2414 if (obj_req->num_img_extents) {
2415 obj_req->tried_parent = true;
2416 ret = rbd_obj_read_from_parent(obj_req);
2418 obj_req->result = ret;
2426 * -ENOENT means a hole in the image -- zero-fill the entire
2427 * length of the request. A short read also implies zero-fill
2428 * to the end of the request. In both cases we update xferred
2429 * count to indicate the whole request was satisfied.
2431 if (obj_req->result == -ENOENT ||
2432 (!obj_req->result && obj_req->xferred < obj_req->ex.oe_len)) {
2433 rbd_assert(!obj_req->xferred || !obj_req->result);
2434 rbd_obj_zero_range(obj_req, obj_req->xferred,
2435 obj_req->ex.oe_len - obj_req->xferred);
2436 obj_req->result = 0;
2437 obj_req->xferred = obj_req->ex.oe_len;
2444 * copyup_bvecs pages are never highmem pages
2446 static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes)
2448 struct ceph_bvec_iter it = {
2450 .iter = { .bi_size = bytes },
2453 ceph_bvec_iter_advance_step(&it, bytes, ({
2454 if (memchr_inv(page_address(bv.bv_page) + bv.bv_offset, 0,
2461 static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes)
2463 struct rbd_img_request *img_req = obj_req->img_request;
2464 unsigned int num_osd_ops = 1;
2467 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
2468 rbd_assert(obj_req->osd_req->r_ops[0].op == CEPH_OSD_OP_STAT);
2469 rbd_osd_req_destroy(obj_req->osd_req);
2471 switch (img_req->op_type) {
2473 num_osd_ops += count_write_ops(obj_req);
2475 case OBJ_OP_ZEROOUT:
2476 num_osd_ops += count_zeroout_ops(obj_req);
2482 obj_req->osd_req = rbd_osd_req_create(obj_req, num_osd_ops);
2483 if (!obj_req->osd_req)
2486 ret = osd_req_op_cls_init(obj_req->osd_req, 0, "rbd", "copyup");
2491 * Only send non-zero copyup data to save some I/O and network
2492 * bandwidth -- zero copyup data is equivalent to the object not
2495 if (is_zero_bvecs(obj_req->copyup_bvecs, bytes)) {
2496 dout("%s obj_req %p detected zeroes\n", __func__, obj_req);
2499 osd_req_op_cls_request_data_bvecs(obj_req->osd_req, 0,
2500 obj_req->copyup_bvecs,
2501 obj_req->copyup_bvec_count,
2504 switch (img_req->op_type) {
2506 __rbd_obj_setup_write(obj_req, 1);
2508 case OBJ_OP_ZEROOUT:
2509 rbd_assert(!rbd_obj_is_entire(obj_req));
2510 __rbd_obj_setup_zeroout(obj_req, 1);
2516 ret = ceph_osdc_alloc_messages(obj_req->osd_req, GFP_NOIO);
2520 rbd_obj_request_submit(obj_req);
2524 static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap)
2528 rbd_assert(!obj_req->copyup_bvecs);
2529 obj_req->copyup_bvec_count = calc_pages_for(0, obj_overlap);
2530 obj_req->copyup_bvecs = kcalloc(obj_req->copyup_bvec_count,
2531 sizeof(*obj_req->copyup_bvecs),
2533 if (!obj_req->copyup_bvecs)
2536 for (i = 0; i < obj_req->copyup_bvec_count; i++) {
2537 unsigned int len = min(obj_overlap, (u64)PAGE_SIZE);
2539 obj_req->copyup_bvecs[i].bv_page = alloc_page(GFP_NOIO);
2540 if (!obj_req->copyup_bvecs[i].bv_page)
2543 obj_req->copyup_bvecs[i].bv_offset = 0;
2544 obj_req->copyup_bvecs[i].bv_len = len;
2548 rbd_assert(!obj_overlap);
2552 static int rbd_obj_handle_write_guard(struct rbd_obj_request *obj_req)
2554 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2557 rbd_assert(obj_req->num_img_extents);
2558 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
2559 rbd_dev->parent_overlap);
2560 if (!obj_req->num_img_extents) {
2562 * The overlap has become 0 (most likely because the
2563 * image has been flattened). Use rbd_obj_issue_copyup()
2564 * to re-submit the original write request -- the copyup
2565 * operation itself will be a no-op, since someone must
2566 * have populated the child object while we weren't
2567 * looking. Move to WRITE_FLAT state as we'll be done
2568 * with the operation once the null copyup completes.
2570 obj_req->write_state = RBD_OBJ_WRITE_FLAT;
2571 return rbd_obj_issue_copyup(obj_req, 0);
2574 ret = setup_copyup_bvecs(obj_req, rbd_obj_img_extents_bytes(obj_req));
2578 obj_req->write_state = RBD_OBJ_WRITE_COPYUP;
2579 return rbd_obj_read_from_parent(obj_req);
2582 static bool rbd_obj_handle_write(struct rbd_obj_request *obj_req)
2587 switch (obj_req->write_state) {
2588 case RBD_OBJ_WRITE_GUARD:
2589 rbd_assert(!obj_req->xferred);
2590 if (obj_req->result == -ENOENT) {
2592 * The target object doesn't exist. Read the data for
2593 * the entire target object up to the overlap point (if
2594 * any) from the parent, so we can use it for a copyup.
2596 ret = rbd_obj_handle_write_guard(obj_req);
2598 obj_req->result = ret;
2604 case RBD_OBJ_WRITE_FLAT:
2605 if (!obj_req->result)
2607 * There is no such thing as a successful short
2608 * write -- indicate the whole request was satisfied.
2610 obj_req->xferred = obj_req->ex.oe_len;
2612 case RBD_OBJ_WRITE_COPYUP:
2613 obj_req->write_state = RBD_OBJ_WRITE_GUARD;
2614 if (obj_req->result)
2617 rbd_assert(obj_req->xferred);
2618 ret = rbd_obj_issue_copyup(obj_req, obj_req->xferred);
2620 obj_req->result = ret;
2621 obj_req->xferred = 0;
2631 * Returns true if @obj_req is completed, or false otherwise.
2633 static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req)
2635 switch (obj_req->img_request->op_type) {
2637 return rbd_obj_handle_read(obj_req);
2639 return rbd_obj_handle_write(obj_req);
2640 case OBJ_OP_DISCARD:
2641 case OBJ_OP_ZEROOUT:
2642 if (rbd_obj_handle_write(obj_req)) {
2644 * Hide -ENOENT from delete/truncate/zero -- discarding
2645 * a non-existent object is not a problem.
2647 if (obj_req->result == -ENOENT) {
2648 obj_req->result = 0;
2649 obj_req->xferred = obj_req->ex.oe_len;
2659 static void rbd_obj_end_request(struct rbd_obj_request *obj_req)
2661 struct rbd_img_request *img_req = obj_req->img_request;
2663 rbd_assert((!obj_req->result &&
2664 obj_req->xferred == obj_req->ex.oe_len) ||
2665 (obj_req->result < 0 && !obj_req->xferred));
2666 if (!obj_req->result) {
2667 img_req->xferred += obj_req->xferred;
2671 rbd_warn(img_req->rbd_dev,
2672 "%s at objno %llu %llu~%llu result %d xferred %llu",
2673 obj_op_name(img_req->op_type), obj_req->ex.oe_objno,
2674 obj_req->ex.oe_off, obj_req->ex.oe_len, obj_req->result,
2676 if (!img_req->result) {
2677 img_req->result = obj_req->result;
2678 img_req->xferred = 0;
2682 static void rbd_img_end_child_request(struct rbd_img_request *img_req)
2684 struct rbd_obj_request *obj_req = img_req->obj_request;
2686 rbd_assert(test_bit(IMG_REQ_CHILD, &img_req->flags));
2687 rbd_assert((!img_req->result &&
2688 img_req->xferred == rbd_obj_img_extents_bytes(obj_req)) ||
2689 (img_req->result < 0 && !img_req->xferred));
2691 obj_req->result = img_req->result;
2692 obj_req->xferred = img_req->xferred;
2693 rbd_img_request_put(img_req);
2696 static void rbd_img_end_request(struct rbd_img_request *img_req)
2698 rbd_assert(!test_bit(IMG_REQ_CHILD, &img_req->flags));
2699 rbd_assert((!img_req->result &&
2700 img_req->xferred == blk_rq_bytes(img_req->rq)) ||
2701 (img_req->result < 0 && !img_req->xferred));
2703 blk_mq_end_request(img_req->rq,
2704 errno_to_blk_status(img_req->result));
2705 rbd_img_request_put(img_req);
2708 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req)
2710 struct rbd_img_request *img_req;
2713 if (!__rbd_obj_handle_request(obj_req))
2716 img_req = obj_req->img_request;
2717 spin_lock(&img_req->completion_lock);
2718 rbd_obj_end_request(obj_req);
2719 rbd_assert(img_req->pending_count);
2720 if (--img_req->pending_count) {
2721 spin_unlock(&img_req->completion_lock);
2725 spin_unlock(&img_req->completion_lock);
2726 if (test_bit(IMG_REQ_CHILD, &img_req->flags)) {
2727 obj_req = img_req->obj_request;
2728 rbd_img_end_child_request(img_req);
2731 rbd_img_end_request(img_req);
2734 static const struct rbd_client_id rbd_empty_cid;
2736 static bool rbd_cid_equal(const struct rbd_client_id *lhs,
2737 const struct rbd_client_id *rhs)
2739 return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
2742 static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
2744 struct rbd_client_id cid;
2746 mutex_lock(&rbd_dev->watch_mutex);
2747 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
2748 cid.handle = rbd_dev->watch_cookie;
2749 mutex_unlock(&rbd_dev->watch_mutex);
2754 * lock_rwsem must be held for write
2756 static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
2757 const struct rbd_client_id *cid)
2759 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
2760 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
2761 cid->gid, cid->handle);
2762 rbd_dev->owner_cid = *cid; /* struct */
2765 static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
2767 mutex_lock(&rbd_dev->watch_mutex);
2768 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
2769 mutex_unlock(&rbd_dev->watch_mutex);
2772 static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
2774 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
2776 strcpy(rbd_dev->lock_cookie, cookie);
2777 rbd_set_owner_cid(rbd_dev, &cid);
2778 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
2782 * lock_rwsem must be held for write
2784 static int rbd_lock(struct rbd_device *rbd_dev)
2786 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2790 WARN_ON(__rbd_is_lock_owner(rbd_dev) ||
2791 rbd_dev->lock_cookie[0] != '\0');
2793 format_lock_cookie(rbd_dev, cookie);
2794 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
2795 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
2796 RBD_LOCK_TAG, "", 0);
2800 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
2801 __rbd_lock(rbd_dev, cookie);
2806 * lock_rwsem must be held for write
2808 static void rbd_unlock(struct rbd_device *rbd_dev)
2810 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2813 WARN_ON(!__rbd_is_lock_owner(rbd_dev) ||
2814 rbd_dev->lock_cookie[0] == '\0');
2816 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
2817 RBD_LOCK_NAME, rbd_dev->lock_cookie);
2818 if (ret && ret != -ENOENT)
2819 rbd_warn(rbd_dev, "failed to unlock: %d", ret);
2821 /* treat errors as the image is unlocked */
2822 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
2823 rbd_dev->lock_cookie[0] = '\0';
2824 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
2825 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
2828 static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
2829 enum rbd_notify_op notify_op,
2830 struct page ***preply_pages,
2833 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2834 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
2835 char buf[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN];
2836 int buf_size = sizeof(buf);
2839 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
2841 /* encode *LockPayload NotifyMessage (op + ClientId) */
2842 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
2843 ceph_encode_32(&p, notify_op);
2844 ceph_encode_64(&p, cid.gid);
2845 ceph_encode_64(&p, cid.handle);
2847 return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
2848 &rbd_dev->header_oloc, buf, buf_size,
2849 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
2852 static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
2853 enum rbd_notify_op notify_op)
2855 struct page **reply_pages;
2858 __rbd_notify_op_lock(rbd_dev, notify_op, &reply_pages, &reply_len);
2859 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
2862 static void rbd_notify_acquired_lock(struct work_struct *work)
2864 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
2865 acquired_lock_work);
2867 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
2870 static void rbd_notify_released_lock(struct work_struct *work)
2872 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
2873 released_lock_work);
2875 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
2878 static int rbd_request_lock(struct rbd_device *rbd_dev)
2880 struct page **reply_pages;
2882 bool lock_owner_responded = false;
2885 dout("%s rbd_dev %p\n", __func__, rbd_dev);
2887 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
2888 &reply_pages, &reply_len);
2889 if (ret && ret != -ETIMEDOUT) {
2890 rbd_warn(rbd_dev, "failed to request lock: %d", ret);
2894 if (reply_len > 0 && reply_len <= PAGE_SIZE) {
2895 void *p = page_address(reply_pages[0]);
2896 void *const end = p + reply_len;
2899 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
2904 ceph_decode_need(&p, end, 8 + 8, e_inval);
2905 p += 8 + 8; /* skip gid and cookie */
2907 ceph_decode_32_safe(&p, end, len, e_inval);
2911 if (lock_owner_responded) {
2913 "duplicate lock owners detected");
2918 lock_owner_responded = true;
2919 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
2923 "failed to decode ResponseMessage: %d",
2928 ret = ceph_decode_32(&p);
2932 if (!lock_owner_responded) {
2933 rbd_warn(rbd_dev, "no lock owners detected");
2938 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
2946 static void wake_requests(struct rbd_device *rbd_dev, bool wake_all)
2948 dout("%s rbd_dev %p wake_all %d\n", __func__, rbd_dev, wake_all);
2950 cancel_delayed_work(&rbd_dev->lock_dwork);
2952 wake_up_all(&rbd_dev->lock_waitq);
2954 wake_up(&rbd_dev->lock_waitq);
2957 static int get_lock_owner_info(struct rbd_device *rbd_dev,
2958 struct ceph_locker **lockers, u32 *num_lockers)
2960 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2965 dout("%s rbd_dev %p\n", __func__, rbd_dev);
2967 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
2968 &rbd_dev->header_oloc, RBD_LOCK_NAME,
2969 &lock_type, &lock_tag, lockers, num_lockers);
2973 if (*num_lockers == 0) {
2974 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
2978 if (strcmp(lock_tag, RBD_LOCK_TAG)) {
2979 rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
2985 if (lock_type == CEPH_CLS_LOCK_SHARED) {
2986 rbd_warn(rbd_dev, "shared lock type detected");
2991 if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
2992 strlen(RBD_LOCK_COOKIE_PREFIX))) {
2993 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
2994 (*lockers)[0].id.cookie);
3004 static int find_watcher(struct rbd_device *rbd_dev,
3005 const struct ceph_locker *locker)
3007 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3008 struct ceph_watch_item *watchers;
3014 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
3015 &rbd_dev->header_oloc, &watchers,
3020 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
3021 for (i = 0; i < num_watchers; i++) {
3022 if (!memcmp(&watchers[i].addr, &locker->info.addr,
3023 sizeof(locker->info.addr)) &&
3024 watchers[i].cookie == cookie) {
3025 struct rbd_client_id cid = {
3026 .gid = le64_to_cpu(watchers[i].name.num),
3030 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
3031 rbd_dev, cid.gid, cid.handle);
3032 rbd_set_owner_cid(rbd_dev, &cid);
3038 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
3046 * lock_rwsem must be held for write
3048 static int rbd_try_lock(struct rbd_device *rbd_dev)
3050 struct ceph_client *client = rbd_dev->rbd_client->client;
3051 struct ceph_locker *lockers;
3056 ret = rbd_lock(rbd_dev);
3060 /* determine if the current lock holder is still alive */
3061 ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
3065 if (num_lockers == 0)
3068 ret = find_watcher(rbd_dev, lockers);
3071 ret = 0; /* have to request lock */
3075 rbd_warn(rbd_dev, "%s%llu seems dead, breaking lock",
3076 ENTITY_NAME(lockers[0].id.name));
3078 ret = ceph_monc_blacklist_add(&client->monc,
3079 &lockers[0].info.addr);
3081 rbd_warn(rbd_dev, "blacklist of %s%llu failed: %d",
3082 ENTITY_NAME(lockers[0].id.name), ret);
3086 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
3087 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3088 lockers[0].id.cookie,
3089 &lockers[0].id.name);
3090 if (ret && ret != -ENOENT)
3094 ceph_free_lockers(lockers, num_lockers);
3098 ceph_free_lockers(lockers, num_lockers);
3103 * ret is set only if lock_state is RBD_LOCK_STATE_UNLOCKED
3105 static enum rbd_lock_state rbd_try_acquire_lock(struct rbd_device *rbd_dev,
3108 enum rbd_lock_state lock_state;
3110 down_read(&rbd_dev->lock_rwsem);
3111 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
3112 rbd_dev->lock_state);
3113 if (__rbd_is_lock_owner(rbd_dev)) {
3114 lock_state = rbd_dev->lock_state;
3115 up_read(&rbd_dev->lock_rwsem);
3119 up_read(&rbd_dev->lock_rwsem);
3120 down_write(&rbd_dev->lock_rwsem);
3121 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
3122 rbd_dev->lock_state);
3123 if (!__rbd_is_lock_owner(rbd_dev)) {
3124 *pret = rbd_try_lock(rbd_dev);
3126 rbd_warn(rbd_dev, "failed to acquire lock: %d", *pret);
3129 lock_state = rbd_dev->lock_state;
3130 up_write(&rbd_dev->lock_rwsem);
3134 static void rbd_acquire_lock(struct work_struct *work)
3136 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3137 struct rbd_device, lock_dwork);
3138 enum rbd_lock_state lock_state;
3141 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3143 lock_state = rbd_try_acquire_lock(rbd_dev, &ret);
3144 if (lock_state != RBD_LOCK_STATE_UNLOCKED || ret == -EBLACKLISTED) {
3145 if (lock_state == RBD_LOCK_STATE_LOCKED)
3146 wake_requests(rbd_dev, true);
3147 dout("%s rbd_dev %p lock_state %d ret %d - done\n", __func__,
3148 rbd_dev, lock_state, ret);
3152 ret = rbd_request_lock(rbd_dev);
3153 if (ret == -ETIMEDOUT) {
3154 goto again; /* treat this as a dead client */
3155 } else if (ret == -EROFS) {
3156 rbd_warn(rbd_dev, "peer will not release lock");
3158 * If this is rbd_add_acquire_lock(), we want to fail
3159 * immediately -- reuse BLACKLISTED flag. Otherwise we
3162 if (!(rbd_dev->disk->flags & GENHD_FL_UP)) {
3163 set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
3164 /* wake "rbd map --exclusive" process */
3165 wake_requests(rbd_dev, false);
3167 } else if (ret < 0) {
3168 rbd_warn(rbd_dev, "error requesting lock: %d", ret);
3169 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3173 * lock owner acked, but resend if we don't see them
3176 dout("%s rbd_dev %p requeueing lock_dwork\n", __func__,
3178 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
3179 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
3184 * lock_rwsem must be held for write
3186 static bool rbd_release_lock(struct rbd_device *rbd_dev)
3188 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
3189 rbd_dev->lock_state);
3190 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
3193 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
3194 downgrade_write(&rbd_dev->lock_rwsem);
3196 * Ensure that all in-flight IO is flushed.
3198 * FIXME: ceph_osdc_sync() flushes the entire OSD client, which
3199 * may be shared with other devices.
3201 ceph_osdc_sync(&rbd_dev->rbd_client->client->osdc);
3202 up_read(&rbd_dev->lock_rwsem);
3204 down_write(&rbd_dev->lock_rwsem);
3205 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
3206 rbd_dev->lock_state);
3207 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
3210 rbd_unlock(rbd_dev);
3212 * Give others a chance to grab the lock - we would re-acquire
3213 * almost immediately if we got new IO during ceph_osdc_sync()
3214 * otherwise. We need to ack our own notifications, so this
3215 * lock_dwork will be requeued from rbd_wait_state_locked()
3216 * after wake_requests() in rbd_handle_released_lock().
3218 cancel_delayed_work(&rbd_dev->lock_dwork);
3222 static void rbd_release_lock_work(struct work_struct *work)
3224 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3227 down_write(&rbd_dev->lock_rwsem);
3228 rbd_release_lock(rbd_dev);
3229 up_write(&rbd_dev->lock_rwsem);
3232 static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
3235 struct rbd_client_id cid = { 0 };
3237 if (struct_v >= 2) {
3238 cid.gid = ceph_decode_64(p);
3239 cid.handle = ceph_decode_64(p);
3242 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3244 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3245 down_write(&rbd_dev->lock_rwsem);
3246 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3248 * we already know that the remote client is
3251 up_write(&rbd_dev->lock_rwsem);
3255 rbd_set_owner_cid(rbd_dev, &cid);
3256 downgrade_write(&rbd_dev->lock_rwsem);
3258 down_read(&rbd_dev->lock_rwsem);
3261 if (!__rbd_is_lock_owner(rbd_dev))
3262 wake_requests(rbd_dev, false);
3263 up_read(&rbd_dev->lock_rwsem);
3266 static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
3269 struct rbd_client_id cid = { 0 };
3271 if (struct_v >= 2) {
3272 cid.gid = ceph_decode_64(p);
3273 cid.handle = ceph_decode_64(p);
3276 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3278 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
3279 down_write(&rbd_dev->lock_rwsem);
3280 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
3281 dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
3282 __func__, rbd_dev, cid.gid, cid.handle,
3283 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
3284 up_write(&rbd_dev->lock_rwsem);
3288 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3289 downgrade_write(&rbd_dev->lock_rwsem);
3291 down_read(&rbd_dev->lock_rwsem);
3294 if (!__rbd_is_lock_owner(rbd_dev))
3295 wake_requests(rbd_dev, false);
3296 up_read(&rbd_dev->lock_rwsem);
3300 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no
3301 * ResponseMessage is needed.
3303 static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
3306 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
3307 struct rbd_client_id cid = { 0 };
3310 if (struct_v >= 2) {
3311 cid.gid = ceph_decode_64(p);
3312 cid.handle = ceph_decode_64(p);
3315 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
3317 if (rbd_cid_equal(&cid, &my_cid))
3320 down_read(&rbd_dev->lock_rwsem);
3321 if (__rbd_is_lock_owner(rbd_dev)) {
3322 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED &&
3323 rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid))
3327 * encode ResponseMessage(0) so the peer can detect
3332 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
3333 if (!rbd_dev->opts->exclusive) {
3334 dout("%s rbd_dev %p queueing unlock_work\n",
3336 queue_work(rbd_dev->task_wq,
3337 &rbd_dev->unlock_work);
3339 /* refuse to release the lock */
3346 up_read(&rbd_dev->lock_rwsem);
3350 static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
3351 u64 notify_id, u64 cookie, s32 *result)
3353 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3354 char buf[4 + CEPH_ENCODING_START_BLK_LEN];
3355 int buf_size = sizeof(buf);
3361 /* encode ResponseMessage */
3362 ceph_start_encoding(&p, 1, 1,
3363 buf_size - CEPH_ENCODING_START_BLK_LEN);
3364 ceph_encode_32(&p, *result);
3369 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
3370 &rbd_dev->header_oloc, notify_id, cookie,
3373 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
3376 static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
3379 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3380 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
3383 static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
3384 u64 notify_id, u64 cookie, s32 result)
3386 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
3387 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
3390 static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
3391 u64 notifier_id, void *data, size_t data_len)
3393 struct rbd_device *rbd_dev = arg;
3395 void *const end = p + data_len;
3401 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
3402 __func__, rbd_dev, cookie, notify_id, data_len);
3404 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
3407 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
3412 notify_op = ceph_decode_32(&p);
3414 /* legacy notification for header updates */
3415 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
3419 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
3420 switch (notify_op) {
3421 case RBD_NOTIFY_OP_ACQUIRED_LOCK:
3422 rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
3423 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3425 case RBD_NOTIFY_OP_RELEASED_LOCK:
3426 rbd_handle_released_lock(rbd_dev, struct_v, &p);
3427 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3429 case RBD_NOTIFY_OP_REQUEST_LOCK:
3430 ret = rbd_handle_request_lock(rbd_dev, struct_v, &p);
3432 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3435 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3437 case RBD_NOTIFY_OP_HEADER_UPDATE:
3438 ret = rbd_dev_refresh(rbd_dev);
3440 rbd_warn(rbd_dev, "refresh failed: %d", ret);
3442 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3445 if (rbd_is_lock_owner(rbd_dev))
3446 rbd_acknowledge_notify_result(rbd_dev, notify_id,
3447 cookie, -EOPNOTSUPP);
3449 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
3454 static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
3456 static void rbd_watch_errcb(void *arg, u64 cookie, int err)
3458 struct rbd_device *rbd_dev = arg;
3460 rbd_warn(rbd_dev, "encountered watch error: %d", err);
3462 down_write(&rbd_dev->lock_rwsem);
3463 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3464 up_write(&rbd_dev->lock_rwsem);
3466 mutex_lock(&rbd_dev->watch_mutex);
3467 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
3468 __rbd_unregister_watch(rbd_dev);
3469 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
3471 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
3473 mutex_unlock(&rbd_dev->watch_mutex);
3477 * watch_mutex must be locked
3479 static int __rbd_register_watch(struct rbd_device *rbd_dev)
3481 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3482 struct ceph_osd_linger_request *handle;
3484 rbd_assert(!rbd_dev->watch_handle);
3485 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3487 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
3488 &rbd_dev->header_oloc, rbd_watch_cb,
3489 rbd_watch_errcb, rbd_dev);
3491 return PTR_ERR(handle);
3493 rbd_dev->watch_handle = handle;
3498 * watch_mutex must be locked
3500 static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
3502 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3505 rbd_assert(rbd_dev->watch_handle);
3506 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3508 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
3510 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
3512 rbd_dev->watch_handle = NULL;
3515 static int rbd_register_watch(struct rbd_device *rbd_dev)
3519 mutex_lock(&rbd_dev->watch_mutex);
3520 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
3521 ret = __rbd_register_watch(rbd_dev);
3525 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
3526 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
3529 mutex_unlock(&rbd_dev->watch_mutex);
3533 static void cancel_tasks_sync(struct rbd_device *rbd_dev)
3535 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3537 cancel_work_sync(&rbd_dev->acquired_lock_work);
3538 cancel_work_sync(&rbd_dev->released_lock_work);
3539 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
3540 cancel_work_sync(&rbd_dev->unlock_work);
3543 static void rbd_unregister_watch(struct rbd_device *rbd_dev)
3545 WARN_ON(waitqueue_active(&rbd_dev->lock_waitq));
3546 cancel_tasks_sync(rbd_dev);
3548 mutex_lock(&rbd_dev->watch_mutex);
3549 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
3550 __rbd_unregister_watch(rbd_dev);
3551 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
3552 mutex_unlock(&rbd_dev->watch_mutex);
3554 cancel_delayed_work_sync(&rbd_dev->watch_dwork);
3555 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
3559 * lock_rwsem must be held for write
3561 static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
3563 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3567 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
3569 format_lock_cookie(rbd_dev, cookie);
3570 ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid,
3571 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3572 CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie,
3573 RBD_LOCK_TAG, cookie);
3575 if (ret != -EOPNOTSUPP)
3576 rbd_warn(rbd_dev, "failed to update lock cookie: %d",
3580 * Lock cookie cannot be updated on older OSDs, so do
3581 * a manual release and queue an acquire.
3583 if (rbd_release_lock(rbd_dev))
3584 queue_delayed_work(rbd_dev->task_wq,
3585 &rbd_dev->lock_dwork, 0);
3587 __rbd_lock(rbd_dev, cookie);
3591 static void rbd_reregister_watch(struct work_struct *work)
3593 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
3594 struct rbd_device, watch_dwork);
3597 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3599 mutex_lock(&rbd_dev->watch_mutex);
3600 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
3601 mutex_unlock(&rbd_dev->watch_mutex);
3605 ret = __rbd_register_watch(rbd_dev);
3607 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
3608 if (ret == -EBLACKLISTED || ret == -ENOENT) {
3609 set_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags);
3610 wake_requests(rbd_dev, true);
3612 queue_delayed_work(rbd_dev->task_wq,
3613 &rbd_dev->watch_dwork,
3616 mutex_unlock(&rbd_dev->watch_mutex);
3620 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
3621 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
3622 mutex_unlock(&rbd_dev->watch_mutex);
3624 down_write(&rbd_dev->lock_rwsem);
3625 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
3626 rbd_reacquire_lock(rbd_dev);
3627 up_write(&rbd_dev->lock_rwsem);
3629 ret = rbd_dev_refresh(rbd_dev);
3631 rbd_warn(rbd_dev, "reregistration refresh failed: %d", ret);
3635 * Synchronous osd object method call. Returns the number of bytes
3636 * returned in the outbound buffer, or a negative error code.
3638 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3639 struct ceph_object_id *oid,
3640 struct ceph_object_locator *oloc,
3641 const char *method_name,
3642 const void *outbound,
3643 size_t outbound_size,
3645 size_t inbound_size)
3647 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3648 struct page *req_page = NULL;
3649 struct page *reply_page;
3653 * Method calls are ultimately read operations. The result
3654 * should placed into the inbound buffer provided. They
3655 * also supply outbound data--parameters for the object
3656 * method. Currently if this is present it will be a
3660 if (outbound_size > PAGE_SIZE)
3663 req_page = alloc_page(GFP_KERNEL);
3667 memcpy(page_address(req_page), outbound, outbound_size);
3670 reply_page = alloc_page(GFP_KERNEL);
3673 __free_page(req_page);
3677 ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name,
3678 CEPH_OSD_FLAG_READ, req_page, outbound_size,
3679 reply_page, &inbound_size);
3681 memcpy(inbound, page_address(reply_page), inbound_size);
3686 __free_page(req_page);
3687 __free_page(reply_page);
3692 * lock_rwsem must be held for read
3694 static int rbd_wait_state_locked(struct rbd_device *rbd_dev, bool may_acquire)
3697 unsigned long timeout;
3700 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags))
3701 return -EBLACKLISTED;
3703 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
3707 rbd_warn(rbd_dev, "exclusive lock required");
3713 * Note the use of mod_delayed_work() in rbd_acquire_lock()
3714 * and cancel_delayed_work() in wake_requests().
3716 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
3717 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
3718 prepare_to_wait_exclusive(&rbd_dev->lock_waitq, &wait,
3719 TASK_UNINTERRUPTIBLE);
3720 up_read(&rbd_dev->lock_rwsem);
3721 timeout = schedule_timeout(ceph_timeout_jiffies(
3722 rbd_dev->opts->lock_timeout));
3723 down_read(&rbd_dev->lock_rwsem);
3724 if (test_bit(RBD_DEV_FLAG_BLACKLISTED, &rbd_dev->flags)) {
3725 ret = -EBLACKLISTED;
3729 rbd_warn(rbd_dev, "timed out waiting for lock");
3733 } while (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED);
3735 finish_wait(&rbd_dev->lock_waitq, &wait);
3739 static void rbd_queue_workfn(struct work_struct *work)
3741 struct request *rq = blk_mq_rq_from_pdu(work);
3742 struct rbd_device *rbd_dev = rq->q->queuedata;
3743 struct rbd_img_request *img_request;
3744 struct ceph_snap_context *snapc = NULL;
3745 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
3746 u64 length = blk_rq_bytes(rq);
3747 enum obj_operation_type op_type;
3749 bool must_be_locked;
3752 switch (req_op(rq)) {
3753 case REQ_OP_DISCARD:
3754 op_type = OBJ_OP_DISCARD;
3756 case REQ_OP_WRITE_ZEROES:
3757 op_type = OBJ_OP_ZEROOUT;
3760 op_type = OBJ_OP_WRITE;
3763 op_type = OBJ_OP_READ;
3766 dout("%s: non-fs request type %d\n", __func__, req_op(rq));
3771 /* Ignore/skip any zero-length requests */
3774 dout("%s: zero-length request\n", __func__);
3779 rbd_assert(op_type == OBJ_OP_READ ||
3780 rbd_dev->spec->snap_id == CEPH_NOSNAP);
3783 * Quit early if the mapped snapshot no longer exists. It's
3784 * still possible the snapshot will have disappeared by the
3785 * time our request arrives at the osd, but there's no sense in
3786 * sending it if we already know.
3788 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3789 dout("request for non-existent snapshot");
3790 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3795 if (offset && length > U64_MAX - offset + 1) {
3796 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
3799 goto err_rq; /* Shouldn't happen */
3802 blk_mq_start_request(rq);
3804 down_read(&rbd_dev->header_rwsem);
3805 mapping_size = rbd_dev->mapping.size;
3806 if (op_type != OBJ_OP_READ) {
3807 snapc = rbd_dev->header.snapc;
3808 ceph_get_snap_context(snapc);
3810 up_read(&rbd_dev->header_rwsem);
3812 if (offset + length > mapping_size) {
3813 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
3814 length, mapping_size);
3820 (rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK) &&
3821 (op_type != OBJ_OP_READ || rbd_dev->opts->lock_on_read);
3822 if (must_be_locked) {
3823 down_read(&rbd_dev->lock_rwsem);
3824 result = rbd_wait_state_locked(rbd_dev,
3825 !rbd_dev->opts->exclusive);
3830 img_request = rbd_img_request_create(rbd_dev, op_type, snapc);
3835 img_request->rq = rq;
3836 snapc = NULL; /* img_request consumes a ref */
3838 if (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_ZEROOUT)
3839 result = rbd_img_fill_nodata(img_request, offset, length);
3841 result = rbd_img_fill_from_bio(img_request, offset, length,
3843 if (result || !img_request->pending_count)
3844 goto err_img_request;
3846 rbd_img_request_submit(img_request);
3848 up_read(&rbd_dev->lock_rwsem);
3852 rbd_img_request_put(img_request);
3855 up_read(&rbd_dev->lock_rwsem);
3858 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
3859 obj_op_name(op_type), length, offset, result);
3860 ceph_put_snap_context(snapc);
3862 blk_mq_end_request(rq, errno_to_blk_status(result));
3865 static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
3866 const struct blk_mq_queue_data *bd)
3868 struct request *rq = bd->rq;
3869 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3871 queue_work(rbd_wq, work);
3875 static void rbd_free_disk(struct rbd_device *rbd_dev)
3877 blk_cleanup_queue(rbd_dev->disk->queue);
3878 blk_mq_free_tag_set(&rbd_dev->tag_set);
3879 put_disk(rbd_dev->disk);
3880 rbd_dev->disk = NULL;
3883 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3884 struct ceph_object_id *oid,
3885 struct ceph_object_locator *oloc,
3886 void *buf, int buf_len)
3889 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3890 struct ceph_osd_request *req;
3891 struct page **pages;
3892 int num_pages = calc_pages_for(0, buf_len);
3895 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
3899 ceph_oid_copy(&req->r_base_oid, oid);
3900 ceph_oloc_copy(&req->r_base_oloc, oloc);
3901 req->r_flags = CEPH_OSD_FLAG_READ;
3903 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
3904 if (IS_ERR(pages)) {
3905 ret = PTR_ERR(pages);
3909 osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0);
3910 osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false,
3913 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
3917 ceph_osdc_start_request(osdc, req, false);
3918 ret = ceph_osdc_wait_request(osdc, req);
3920 ceph_copy_from_page_vector(pages, buf, 0, ret);
3923 ceph_osdc_put_request(req);
3928 * Read the complete header for the given rbd device. On successful
3929 * return, the rbd_dev->header field will contain up-to-date
3930 * information about the image.
3932 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3934 struct rbd_image_header_ondisk *ondisk = NULL;
3941 * The complete header will include an array of its 64-bit
3942 * snapshot ids, followed by the names of those snapshots as
3943 * a contiguous block of NUL-terminated strings. Note that
3944 * the number of snapshots could change by the time we read
3945 * it in, in which case we re-read it.
3952 size = sizeof (*ondisk);
3953 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3955 ondisk = kmalloc(size, GFP_KERNEL);
3959 ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid,
3960 &rbd_dev->header_oloc, ondisk, size);
3963 if ((size_t)ret < size) {
3965 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3969 if (!rbd_dev_ondisk_valid(ondisk)) {
3971 rbd_warn(rbd_dev, "invalid header");
3975 names_size = le64_to_cpu(ondisk->snap_names_len);
3976 want_count = snap_count;
3977 snap_count = le32_to_cpu(ondisk->snap_count);
3978 } while (snap_count != want_count);
3980 ret = rbd_header_from_disk(rbd_dev, ondisk);
3988 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3989 * has disappeared from the (just updated) snapshot context.
3991 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3995 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3998 snap_id = rbd_dev->spec->snap_id;
3999 if (snap_id == CEPH_NOSNAP)
4002 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
4003 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
4006 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
4011 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
4012 * try to update its size. If REMOVING is set, updating size
4013 * is just useless work since the device can't be opened.
4015 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
4016 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
4017 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
4018 dout("setting size to %llu sectors", (unsigned long long)size);
4019 set_capacity(rbd_dev->disk, size);
4020 revalidate_disk(rbd_dev->disk);
4024 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
4029 down_write(&rbd_dev->header_rwsem);
4030 mapping_size = rbd_dev->mapping.size;
4032 ret = rbd_dev_header_info(rbd_dev);
4037 * If there is a parent, see if it has disappeared due to the
4038 * mapped image getting flattened.
4040 if (rbd_dev->parent) {
4041 ret = rbd_dev_v2_parent_info(rbd_dev);
4046 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
4047 rbd_dev->mapping.size = rbd_dev->header.image_size;
4049 /* validate mapped snapshot's EXISTS flag */
4050 rbd_exists_validate(rbd_dev);
4054 up_write(&rbd_dev->header_rwsem);
4055 if (!ret && mapping_size != rbd_dev->mapping.size)
4056 rbd_dev_update_size(rbd_dev);
4061 static int rbd_init_request(struct blk_mq_tag_set *set, struct request *rq,
4062 unsigned int hctx_idx, unsigned int numa_node)
4064 struct work_struct *work = blk_mq_rq_to_pdu(rq);
4066 INIT_WORK(work, rbd_queue_workfn);
4070 static const struct blk_mq_ops rbd_mq_ops = {
4071 .queue_rq = rbd_queue_rq,
4072 .init_request = rbd_init_request,
4075 static int rbd_init_disk(struct rbd_device *rbd_dev)
4077 struct gendisk *disk;
4078 struct request_queue *q;
4079 unsigned int objset_bytes =
4080 rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
4083 /* create gendisk info */
4084 disk = alloc_disk(single_major ?
4085 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
4086 RBD_MINORS_PER_MAJOR);
4090 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
4092 disk->major = rbd_dev->major;
4093 disk->first_minor = rbd_dev->minor;
4095 disk->flags |= GENHD_FL_EXT_DEVT;
4096 disk->fops = &rbd_bd_ops;
4097 disk->private_data = rbd_dev;
4099 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
4100 rbd_dev->tag_set.ops = &rbd_mq_ops;
4101 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
4102 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
4103 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
4104 rbd_dev->tag_set.nr_hw_queues = 1;
4105 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
4107 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
4111 q = blk_mq_init_queue(&rbd_dev->tag_set);
4117 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
4118 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
4120 blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT);
4121 q->limits.max_sectors = queue_max_hw_sectors(q);
4122 blk_queue_max_segments(q, USHRT_MAX);
4123 blk_queue_max_segment_size(q, UINT_MAX);
4124 blk_queue_io_min(q, objset_bytes);
4125 blk_queue_io_opt(q, objset_bytes);
4127 if (rbd_dev->opts->trim) {
4128 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
4129 q->limits.discard_granularity = objset_bytes;
4130 blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
4131 blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
4134 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
4135 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
4138 * disk_release() expects a queue ref from add_disk() and will
4139 * put it. Hold an extra ref until add_disk() is called.
4141 WARN_ON(!blk_get_queue(q));
4143 q->queuedata = rbd_dev;
4145 rbd_dev->disk = disk;
4149 blk_mq_free_tag_set(&rbd_dev->tag_set);
4159 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
4161 return container_of(dev, struct rbd_device, dev);
4164 static ssize_t rbd_size_show(struct device *dev,
4165 struct device_attribute *attr, char *buf)
4167 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4169 return sprintf(buf, "%llu\n",
4170 (unsigned long long)rbd_dev->mapping.size);
4174 * Note this shows the features for whatever's mapped, which is not
4175 * necessarily the base image.
4177 static ssize_t rbd_features_show(struct device *dev,
4178 struct device_attribute *attr, char *buf)
4180 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4182 return sprintf(buf, "0x%016llx\n",
4183 (unsigned long long)rbd_dev->mapping.features);
4186 static ssize_t rbd_major_show(struct device *dev,
4187 struct device_attribute *attr, char *buf)
4189 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4192 return sprintf(buf, "%d\n", rbd_dev->major);
4194 return sprintf(buf, "(none)\n");
4197 static ssize_t rbd_minor_show(struct device *dev,
4198 struct device_attribute *attr, char *buf)
4200 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4202 return sprintf(buf, "%d\n", rbd_dev->minor);
4205 static ssize_t rbd_client_addr_show(struct device *dev,
4206 struct device_attribute *attr, char *buf)
4208 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4209 struct ceph_entity_addr *client_addr =
4210 ceph_client_addr(rbd_dev->rbd_client->client);
4212 return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
4213 le32_to_cpu(client_addr->nonce));
4216 static ssize_t rbd_client_id_show(struct device *dev,
4217 struct device_attribute *attr, char *buf)
4219 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4221 return sprintf(buf, "client%lld\n",
4222 ceph_client_gid(rbd_dev->rbd_client->client));
4225 static ssize_t rbd_cluster_fsid_show(struct device *dev,
4226 struct device_attribute *attr, char *buf)
4228 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4230 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
4233 static ssize_t rbd_config_info_show(struct device *dev,
4234 struct device_attribute *attr, char *buf)
4236 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4238 return sprintf(buf, "%s\n", rbd_dev->config_info);
4241 static ssize_t rbd_pool_show(struct device *dev,
4242 struct device_attribute *attr, char *buf)
4244 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4246 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
4249 static ssize_t rbd_pool_id_show(struct device *dev,
4250 struct device_attribute *attr, char *buf)
4252 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4254 return sprintf(buf, "%llu\n",
4255 (unsigned long long) rbd_dev->spec->pool_id);
4258 static ssize_t rbd_pool_ns_show(struct device *dev,
4259 struct device_attribute *attr, char *buf)
4261 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4263 return sprintf(buf, "%s\n", rbd_dev->spec->pool_ns ?: "");
4266 static ssize_t rbd_name_show(struct device *dev,
4267 struct device_attribute *attr, char *buf)
4269 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4271 if (rbd_dev->spec->image_name)
4272 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
4274 return sprintf(buf, "(unknown)\n");
4277 static ssize_t rbd_image_id_show(struct device *dev,
4278 struct device_attribute *attr, char *buf)
4280 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4282 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
4286 * Shows the name of the currently-mapped snapshot (or
4287 * RBD_SNAP_HEAD_NAME for the base image).
4289 static ssize_t rbd_snap_show(struct device *dev,
4290 struct device_attribute *attr,
4293 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4295 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
4298 static ssize_t rbd_snap_id_show(struct device *dev,
4299 struct device_attribute *attr, char *buf)
4301 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4303 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
4307 * For a v2 image, shows the chain of parent images, separated by empty
4308 * lines. For v1 images or if there is no parent, shows "(no parent
4311 static ssize_t rbd_parent_show(struct device *dev,
4312 struct device_attribute *attr,
4315 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4318 if (!rbd_dev->parent)
4319 return sprintf(buf, "(no parent image)\n");
4321 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
4322 struct rbd_spec *spec = rbd_dev->parent_spec;
4324 count += sprintf(&buf[count], "%s"
4325 "pool_id %llu\npool_name %s\n"
4327 "image_id %s\nimage_name %s\n"
4328 "snap_id %llu\nsnap_name %s\n"
4330 !count ? "" : "\n", /* first? */
4331 spec->pool_id, spec->pool_name,
4332 spec->pool_ns ?: "",
4333 spec->image_id, spec->image_name ?: "(unknown)",
4334 spec->snap_id, spec->snap_name,
4335 rbd_dev->parent_overlap);
4341 static ssize_t rbd_image_refresh(struct device *dev,
4342 struct device_attribute *attr,
4346 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4349 ret = rbd_dev_refresh(rbd_dev);
4356 static DEVICE_ATTR(size, 0444, rbd_size_show, NULL);
4357 static DEVICE_ATTR(features, 0444, rbd_features_show, NULL);
4358 static DEVICE_ATTR(major, 0444, rbd_major_show, NULL);
4359 static DEVICE_ATTR(minor, 0444, rbd_minor_show, NULL);
4360 static DEVICE_ATTR(client_addr, 0444, rbd_client_addr_show, NULL);
4361 static DEVICE_ATTR(client_id, 0444, rbd_client_id_show, NULL);
4362 static DEVICE_ATTR(cluster_fsid, 0444, rbd_cluster_fsid_show, NULL);
4363 static DEVICE_ATTR(config_info, 0400, rbd_config_info_show, NULL);
4364 static DEVICE_ATTR(pool, 0444, rbd_pool_show, NULL);
4365 static DEVICE_ATTR(pool_id, 0444, rbd_pool_id_show, NULL);
4366 static DEVICE_ATTR(pool_ns, 0444, rbd_pool_ns_show, NULL);
4367 static DEVICE_ATTR(name, 0444, rbd_name_show, NULL);
4368 static DEVICE_ATTR(image_id, 0444, rbd_image_id_show, NULL);
4369 static DEVICE_ATTR(refresh, 0200, NULL, rbd_image_refresh);
4370 static DEVICE_ATTR(current_snap, 0444, rbd_snap_show, NULL);
4371 static DEVICE_ATTR(snap_id, 0444, rbd_snap_id_show, NULL);
4372 static DEVICE_ATTR(parent, 0444, rbd_parent_show, NULL);
4374 static struct attribute *rbd_attrs[] = {
4375 &dev_attr_size.attr,
4376 &dev_attr_features.attr,
4377 &dev_attr_major.attr,
4378 &dev_attr_minor.attr,
4379 &dev_attr_client_addr.attr,
4380 &dev_attr_client_id.attr,
4381 &dev_attr_cluster_fsid.attr,
4382 &dev_attr_config_info.attr,
4383 &dev_attr_pool.attr,
4384 &dev_attr_pool_id.attr,
4385 &dev_attr_pool_ns.attr,
4386 &dev_attr_name.attr,
4387 &dev_attr_image_id.attr,
4388 &dev_attr_current_snap.attr,
4389 &dev_attr_snap_id.attr,
4390 &dev_attr_parent.attr,
4391 &dev_attr_refresh.attr,
4395 static struct attribute_group rbd_attr_group = {
4399 static const struct attribute_group *rbd_attr_groups[] = {
4404 static void rbd_dev_release(struct device *dev);
4406 static const struct device_type rbd_device_type = {
4408 .groups = rbd_attr_groups,
4409 .release = rbd_dev_release,
4412 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
4414 kref_get(&spec->kref);
4419 static void rbd_spec_free(struct kref *kref);
4420 static void rbd_spec_put(struct rbd_spec *spec)
4423 kref_put(&spec->kref, rbd_spec_free);
4426 static struct rbd_spec *rbd_spec_alloc(void)
4428 struct rbd_spec *spec;
4430 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
4434 spec->pool_id = CEPH_NOPOOL;
4435 spec->snap_id = CEPH_NOSNAP;
4436 kref_init(&spec->kref);
4441 static void rbd_spec_free(struct kref *kref)
4443 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
4445 kfree(spec->pool_name);
4446 kfree(spec->pool_ns);
4447 kfree(spec->image_id);
4448 kfree(spec->image_name);
4449 kfree(spec->snap_name);
4453 static void rbd_dev_free(struct rbd_device *rbd_dev)
4455 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
4456 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
4458 ceph_oid_destroy(&rbd_dev->header_oid);
4459 ceph_oloc_destroy(&rbd_dev->header_oloc);
4460 kfree(rbd_dev->config_info);
4462 rbd_put_client(rbd_dev->rbd_client);
4463 rbd_spec_put(rbd_dev->spec);
4464 kfree(rbd_dev->opts);
4468 static void rbd_dev_release(struct device *dev)
4470 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4471 bool need_put = !!rbd_dev->opts;
4474 destroy_workqueue(rbd_dev->task_wq);
4475 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4478 rbd_dev_free(rbd_dev);
4481 * This is racy, but way better than putting module outside of
4482 * the release callback. The race window is pretty small, so
4483 * doing something similar to dm (dm-builtin.c) is overkill.
4486 module_put(THIS_MODULE);
4489 static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
4490 struct rbd_spec *spec)
4492 struct rbd_device *rbd_dev;
4494 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
4498 spin_lock_init(&rbd_dev->lock);
4499 INIT_LIST_HEAD(&rbd_dev->node);
4500 init_rwsem(&rbd_dev->header_rwsem);
4502 rbd_dev->header.data_pool_id = CEPH_NOPOOL;
4503 ceph_oid_init(&rbd_dev->header_oid);
4504 rbd_dev->header_oloc.pool = spec->pool_id;
4505 if (spec->pool_ns) {
4506 WARN_ON(!*spec->pool_ns);
4507 rbd_dev->header_oloc.pool_ns =
4508 ceph_find_or_create_string(spec->pool_ns,
4509 strlen(spec->pool_ns));
4512 mutex_init(&rbd_dev->watch_mutex);
4513 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
4514 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
4516 init_rwsem(&rbd_dev->lock_rwsem);
4517 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
4518 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
4519 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
4520 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
4521 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
4522 init_waitqueue_head(&rbd_dev->lock_waitq);
4524 rbd_dev->dev.bus = &rbd_bus_type;
4525 rbd_dev->dev.type = &rbd_device_type;
4526 rbd_dev->dev.parent = &rbd_root_dev;
4527 device_initialize(&rbd_dev->dev);
4529 rbd_dev->rbd_client = rbdc;
4530 rbd_dev->spec = spec;
4536 * Create a mapping rbd_dev.
4538 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
4539 struct rbd_spec *spec,
4540 struct rbd_options *opts)
4542 struct rbd_device *rbd_dev;
4544 rbd_dev = __rbd_dev_create(rbdc, spec);
4548 rbd_dev->opts = opts;
4550 /* get an id and fill in device name */
4551 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
4552 minor_to_rbd_dev_id(1 << MINORBITS),
4554 if (rbd_dev->dev_id < 0)
4557 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
4558 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
4560 if (!rbd_dev->task_wq)
4563 /* we have a ref from do_rbd_add() */
4564 __module_get(THIS_MODULE);
4566 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
4570 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4572 rbd_dev_free(rbd_dev);
4576 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
4579 put_device(&rbd_dev->dev);
4583 * Get the size and object order for an image snapshot, or if
4584 * snap_id is CEPH_NOSNAP, gets this information for the base
4587 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
4588 u8 *order, u64 *snap_size)
4590 __le64 snapid = cpu_to_le64(snap_id);
4595 } __attribute__ ((packed)) size_buf = { 0 };
4597 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4598 &rbd_dev->header_oloc, "get_size",
4599 &snapid, sizeof(snapid),
4600 &size_buf, sizeof(size_buf));
4601 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4604 if (ret < sizeof (size_buf))
4608 *order = size_buf.order;
4609 dout(" order %u", (unsigned int)*order);
4611 *snap_size = le64_to_cpu(size_buf.size);
4613 dout(" snap_id 0x%016llx snap_size = %llu\n",
4614 (unsigned long long)snap_id,
4615 (unsigned long long)*snap_size);
4620 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
4622 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
4623 &rbd_dev->header.obj_order,
4624 &rbd_dev->header.image_size);
4627 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
4633 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
4637 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4638 &rbd_dev->header_oloc, "get_object_prefix",
4639 NULL, 0, reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
4640 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4645 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
4646 p + ret, NULL, GFP_NOIO);
4649 if (IS_ERR(rbd_dev->header.object_prefix)) {
4650 ret = PTR_ERR(rbd_dev->header.object_prefix);
4651 rbd_dev->header.object_prefix = NULL;
4653 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
4661 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4664 __le64 snapid = cpu_to_le64(snap_id);
4668 } __attribute__ ((packed)) features_buf = { 0 };
4672 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4673 &rbd_dev->header_oloc, "get_features",
4674 &snapid, sizeof(snapid),
4675 &features_buf, sizeof(features_buf));
4676 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4679 if (ret < sizeof (features_buf))
4682 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
4684 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
4689 *snap_features = le64_to_cpu(features_buf.features);
4691 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
4692 (unsigned long long)snap_id,
4693 (unsigned long long)*snap_features,
4694 (unsigned long long)le64_to_cpu(features_buf.incompat));
4699 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
4701 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
4702 &rbd_dev->header.features);
4705 struct parent_image_info {
4707 const char *pool_ns;
4708 const char *image_id;
4716 * The caller is responsible for @pii.
4718 static int decode_parent_image_spec(void **p, void *end,
4719 struct parent_image_info *pii)
4725 ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",
4726 &struct_v, &struct_len);
4730 ceph_decode_64_safe(p, end, pii->pool_id, e_inval);
4731 pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
4732 if (IS_ERR(pii->pool_ns)) {
4733 ret = PTR_ERR(pii->pool_ns);
4734 pii->pool_ns = NULL;
4737 pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
4738 if (IS_ERR(pii->image_id)) {
4739 ret = PTR_ERR(pii->image_id);
4740 pii->image_id = NULL;
4743 ceph_decode_64_safe(p, end, pii->snap_id, e_inval);
4750 static int __get_parent_info(struct rbd_device *rbd_dev,
4751 struct page *req_page,
4752 struct page *reply_page,
4753 struct parent_image_info *pii)
4755 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4756 size_t reply_len = PAGE_SIZE;
4760 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
4761 "rbd", "parent_get", CEPH_OSD_FLAG_READ,
4762 req_page, sizeof(u64), reply_page, &reply_len);
4764 return ret == -EOPNOTSUPP ? 1 : ret;
4766 p = page_address(reply_page);
4767 end = p + reply_len;
4768 ret = decode_parent_image_spec(&p, end, pii);
4772 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
4773 "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ,
4774 req_page, sizeof(u64), reply_page, &reply_len);
4778 p = page_address(reply_page);
4779 end = p + reply_len;
4780 ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);
4781 if (pii->has_overlap)
4782 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
4791 * The caller is responsible for @pii.
4793 static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
4794 struct page *req_page,
4795 struct page *reply_page,
4796 struct parent_image_info *pii)
4798 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4799 size_t reply_len = PAGE_SIZE;
4803 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
4804 "rbd", "get_parent", CEPH_OSD_FLAG_READ,
4805 req_page, sizeof(u64), reply_page, &reply_len);
4809 p = page_address(reply_page);
4810 end = p + reply_len;
4811 ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);
4812 pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4813 if (IS_ERR(pii->image_id)) {
4814 ret = PTR_ERR(pii->image_id);
4815 pii->image_id = NULL;
4818 ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);
4819 pii->has_overlap = true;
4820 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
4828 static int get_parent_info(struct rbd_device *rbd_dev,
4829 struct parent_image_info *pii)
4831 struct page *req_page, *reply_page;
4835 req_page = alloc_page(GFP_KERNEL);
4839 reply_page = alloc_page(GFP_KERNEL);
4841 __free_page(req_page);
4845 p = page_address(req_page);
4846 ceph_encode_64(&p, rbd_dev->spec->snap_id);
4847 ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
4849 ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
4852 __free_page(req_page);
4853 __free_page(reply_page);
4857 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4859 struct rbd_spec *parent_spec;
4860 struct parent_image_info pii = { 0 };
4863 parent_spec = rbd_spec_alloc();
4867 ret = get_parent_info(rbd_dev, &pii);
4871 dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
4872 __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
4873 pii.has_overlap, pii.overlap);
4875 if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
4877 * Either the parent never existed, or we have
4878 * record of it but the image got flattened so it no
4879 * longer has a parent. When the parent of a
4880 * layered image disappears we immediately set the
4881 * overlap to 0. The effect of this is that all new
4882 * requests will be treated as if the image had no
4885 * If !pii.has_overlap, the parent image spec is not
4886 * applicable. It's there to avoid duplication in each
4889 if (rbd_dev->parent_overlap) {
4890 rbd_dev->parent_overlap = 0;
4891 rbd_dev_parent_put(rbd_dev);
4892 pr_info("%s: clone image has been flattened\n",
4893 rbd_dev->disk->disk_name);
4896 goto out; /* No parent? No problem. */
4899 /* The ceph file layout needs to fit pool id in 32 bits */
4902 if (pii.pool_id > (u64)U32_MAX) {
4903 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
4904 (unsigned long long)pii.pool_id, U32_MAX);
4909 * The parent won't change (except when the clone is
4910 * flattened, already handled that). So we only need to
4911 * record the parent spec we have not already done so.
4913 if (!rbd_dev->parent_spec) {
4914 parent_spec->pool_id = pii.pool_id;
4915 if (pii.pool_ns && *pii.pool_ns) {
4916 parent_spec->pool_ns = pii.pool_ns;
4919 parent_spec->image_id = pii.image_id;
4920 pii.image_id = NULL;
4921 parent_spec->snap_id = pii.snap_id;
4923 rbd_dev->parent_spec = parent_spec;
4924 parent_spec = NULL; /* rbd_dev now owns this */
4928 * We always update the parent overlap. If it's zero we issue
4929 * a warning, as we will proceed as if there was no parent.
4933 /* refresh, careful to warn just once */
4934 if (rbd_dev->parent_overlap)
4936 "clone now standalone (overlap became 0)");
4939 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
4942 rbd_dev->parent_overlap = pii.overlap;
4948 kfree(pii.image_id);
4949 rbd_spec_put(parent_spec);
4953 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4957 __le64 stripe_count;
4958 } __attribute__ ((packed)) striping_info_buf = { 0 };
4959 size_t size = sizeof (striping_info_buf);
4963 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4964 &rbd_dev->header_oloc, "get_stripe_unit_count",
4965 NULL, 0, &striping_info_buf, size);
4966 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4972 p = &striping_info_buf;
4973 rbd_dev->header.stripe_unit = ceph_decode_64(&p);
4974 rbd_dev->header.stripe_count = ceph_decode_64(&p);
4978 static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
4980 __le64 data_pool_id;
4983 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
4984 &rbd_dev->header_oloc, "get_data_pool",
4985 NULL, 0, &data_pool_id, sizeof(data_pool_id));
4988 if (ret < sizeof(data_pool_id))
4991 rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
4992 WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
4996 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4998 CEPH_DEFINE_OID_ONSTACK(oid);
4999 size_t image_id_size;
5004 void *reply_buf = NULL;
5006 char *image_name = NULL;
5009 rbd_assert(!rbd_dev->spec->image_name);
5011 len = strlen(rbd_dev->spec->image_id);
5012 image_id_size = sizeof (__le32) + len;
5013 image_id = kmalloc(image_id_size, GFP_KERNEL);
5018 end = image_id + image_id_size;
5019 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
5021 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
5022 reply_buf = kmalloc(size, GFP_KERNEL);
5026 ceph_oid_printf(&oid, "%s", RBD_DIRECTORY);
5027 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
5028 "dir_get_name", image_id, image_id_size,
5033 end = reply_buf + ret;
5035 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
5036 if (IS_ERR(image_name))
5039 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
5047 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5049 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5050 const char *snap_name;
5053 /* Skip over names until we find the one we are looking for */
5055 snap_name = rbd_dev->header.snap_names;
5056 while (which < snapc->num_snaps) {
5057 if (!strcmp(name, snap_name))
5058 return snapc->snaps[which];
5059 snap_name += strlen(snap_name) + 1;
5065 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5067 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5072 for (which = 0; !found && which < snapc->num_snaps; which++) {
5073 const char *snap_name;
5075 snap_id = snapc->snaps[which];
5076 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
5077 if (IS_ERR(snap_name)) {
5078 /* ignore no-longer existing snapshots */
5079 if (PTR_ERR(snap_name) == -ENOENT)
5084 found = !strcmp(name, snap_name);
5087 return found ? snap_id : CEPH_NOSNAP;
5091 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
5092 * no snapshot by that name is found, or if an error occurs.
5094 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5096 if (rbd_dev->image_format == 1)
5097 return rbd_v1_snap_id_by_name(rbd_dev, name);
5099 return rbd_v2_snap_id_by_name(rbd_dev, name);
5103 * An image being mapped will have everything but the snap id.
5105 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
5107 struct rbd_spec *spec = rbd_dev->spec;
5109 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
5110 rbd_assert(spec->image_id && spec->image_name);
5111 rbd_assert(spec->snap_name);
5113 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
5116 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
5117 if (snap_id == CEPH_NOSNAP)
5120 spec->snap_id = snap_id;
5122 spec->snap_id = CEPH_NOSNAP;
5129 * A parent image will have all ids but none of the names.
5131 * All names in an rbd spec are dynamically allocated. It's OK if we
5132 * can't figure out the name for an image id.
5134 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
5136 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5137 struct rbd_spec *spec = rbd_dev->spec;
5138 const char *pool_name;
5139 const char *image_name;
5140 const char *snap_name;
5143 rbd_assert(spec->pool_id != CEPH_NOPOOL);
5144 rbd_assert(spec->image_id);
5145 rbd_assert(spec->snap_id != CEPH_NOSNAP);
5147 /* Get the pool name; we have to make our own copy of this */
5149 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
5151 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
5154 pool_name = kstrdup(pool_name, GFP_KERNEL);
5158 /* Fetch the image name; tolerate failure here */
5160 image_name = rbd_dev_image_name(rbd_dev);
5162 rbd_warn(rbd_dev, "unable to get image name");
5164 /* Fetch the snapshot name */
5166 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
5167 if (IS_ERR(snap_name)) {
5168 ret = PTR_ERR(snap_name);
5172 spec->pool_name = pool_name;
5173 spec->image_name = image_name;
5174 spec->snap_name = snap_name;
5184 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
5193 struct ceph_snap_context *snapc;
5197 * We'll need room for the seq value (maximum snapshot id),
5198 * snapshot count, and array of that many snapshot ids.
5199 * For now we have a fixed upper limit on the number we're
5200 * prepared to receive.
5202 size = sizeof (__le64) + sizeof (__le32) +
5203 RBD_MAX_SNAP_COUNT * sizeof (__le64);
5204 reply_buf = kzalloc(size, GFP_KERNEL);
5208 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5209 &rbd_dev->header_oloc, "get_snapcontext",
5210 NULL, 0, reply_buf, size);
5211 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5216 end = reply_buf + ret;
5218 ceph_decode_64_safe(&p, end, seq, out);
5219 ceph_decode_32_safe(&p, end, snap_count, out);
5222 * Make sure the reported number of snapshot ids wouldn't go
5223 * beyond the end of our buffer. But before checking that,
5224 * make sure the computed size of the snapshot context we
5225 * allocate is representable in a size_t.
5227 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
5232 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
5236 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
5242 for (i = 0; i < snap_count; i++)
5243 snapc->snaps[i] = ceph_decode_64(&p);
5245 ceph_put_snap_context(rbd_dev->header.snapc);
5246 rbd_dev->header.snapc = snapc;
5248 dout(" snap context seq = %llu, snap_count = %u\n",
5249 (unsigned long long)seq, (unsigned int)snap_count);
5256 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
5267 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
5268 reply_buf = kmalloc(size, GFP_KERNEL);
5270 return ERR_PTR(-ENOMEM);
5272 snapid = cpu_to_le64(snap_id);
5273 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5274 &rbd_dev->header_oloc, "get_snapshot_name",
5275 &snapid, sizeof(snapid), reply_buf, size);
5276 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5278 snap_name = ERR_PTR(ret);
5283 end = reply_buf + ret;
5284 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
5285 if (IS_ERR(snap_name))
5288 dout(" snap_id 0x%016llx snap_name = %s\n",
5289 (unsigned long long)snap_id, snap_name);
5296 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
5298 bool first_time = rbd_dev->header.object_prefix == NULL;
5301 ret = rbd_dev_v2_image_size(rbd_dev);
5306 ret = rbd_dev_v2_header_onetime(rbd_dev);
5311 ret = rbd_dev_v2_snap_context(rbd_dev);
5312 if (ret && first_time) {
5313 kfree(rbd_dev->header.object_prefix);
5314 rbd_dev->header.object_prefix = NULL;
5320 static int rbd_dev_header_info(struct rbd_device *rbd_dev)
5322 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5324 if (rbd_dev->image_format == 1)
5325 return rbd_dev_v1_header_info(rbd_dev);
5327 return rbd_dev_v2_header_info(rbd_dev);
5331 * Skips over white space at *buf, and updates *buf to point to the
5332 * first found non-space character (if any). Returns the length of
5333 * the token (string of non-white space characters) found. Note
5334 * that *buf must be terminated with '\0'.
5336 static inline size_t next_token(const char **buf)
5339 * These are the characters that produce nonzero for
5340 * isspace() in the "C" and "POSIX" locales.
5342 const char *spaces = " \f\n\r\t\v";
5344 *buf += strspn(*buf, spaces); /* Find start of token */
5346 return strcspn(*buf, spaces); /* Return token length */
5350 * Finds the next token in *buf, dynamically allocates a buffer big
5351 * enough to hold a copy of it, and copies the token into the new
5352 * buffer. The copy is guaranteed to be terminated with '\0'. Note
5353 * that a duplicate buffer is created even for a zero-length token.
5355 * Returns a pointer to the newly-allocated duplicate, or a null
5356 * pointer if memory for the duplicate was not available. If
5357 * the lenp argument is a non-null pointer, the length of the token
5358 * (not including the '\0') is returned in *lenp.
5360 * If successful, the *buf pointer will be updated to point beyond
5361 * the end of the found token.
5363 * Note: uses GFP_KERNEL for allocation.
5365 static inline char *dup_token(const char **buf, size_t *lenp)
5370 len = next_token(buf);
5371 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
5374 *(dup + len) = '\0';
5384 * Parse the options provided for an "rbd add" (i.e., rbd image
5385 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
5386 * and the data written is passed here via a NUL-terminated buffer.
5387 * Returns 0 if successful or an error code otherwise.
5389 * The information extracted from these options is recorded in
5390 * the other parameters which return dynamically-allocated
5393 * The address of a pointer that will refer to a ceph options
5394 * structure. Caller must release the returned pointer using
5395 * ceph_destroy_options() when it is no longer needed.
5397 * Address of an rbd options pointer. Fully initialized by
5398 * this function; caller must release with kfree().
5400 * Address of an rbd image specification pointer. Fully
5401 * initialized by this function based on parsed options.
5402 * Caller must release with rbd_spec_put().
5404 * The options passed take this form:
5405 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
5408 * A comma-separated list of one or more monitor addresses.
5409 * A monitor address is an ip address, optionally followed
5410 * by a port number (separated by a colon).
5411 * I.e.: ip1[:port1][,ip2[:port2]...]
5413 * A comma-separated list of ceph and/or rbd options.
5415 * The name of the rados pool containing the rbd image.
5417 * The name of the image in that pool to map.
5419 * An optional snapshot id. If provided, the mapping will
5420 * present data from the image at the time that snapshot was
5421 * created. The image head is used if no snapshot id is
5422 * provided. Snapshot mappings are always read-only.
5424 static int rbd_add_parse_args(const char *buf,
5425 struct ceph_options **ceph_opts,
5426 struct rbd_options **opts,
5427 struct rbd_spec **rbd_spec)
5431 const char *mon_addrs;
5433 size_t mon_addrs_size;
5434 struct parse_rbd_opts_ctx pctx = { 0 };
5435 struct ceph_options *copts;
5438 /* The first four tokens are required */
5440 len = next_token(&buf);
5442 rbd_warn(NULL, "no monitor address(es) provided");
5446 mon_addrs_size = len + 1;
5450 options = dup_token(&buf, NULL);
5454 rbd_warn(NULL, "no options provided");
5458 pctx.spec = rbd_spec_alloc();
5462 pctx.spec->pool_name = dup_token(&buf, NULL);
5463 if (!pctx.spec->pool_name)
5465 if (!*pctx.spec->pool_name) {
5466 rbd_warn(NULL, "no pool name provided");
5470 pctx.spec->image_name = dup_token(&buf, NULL);
5471 if (!pctx.spec->image_name)
5473 if (!*pctx.spec->image_name) {
5474 rbd_warn(NULL, "no image name provided");
5479 * Snapshot name is optional; default is to use "-"
5480 * (indicating the head/no snapshot).
5482 len = next_token(&buf);
5484 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
5485 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
5486 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
5487 ret = -ENAMETOOLONG;
5490 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
5493 *(snap_name + len) = '\0';
5494 pctx.spec->snap_name = snap_name;
5496 /* Initialize all rbd options to the defaults */
5498 pctx.opts = kzalloc(sizeof(*pctx.opts), GFP_KERNEL);
5502 pctx.opts->read_only = RBD_READ_ONLY_DEFAULT;
5503 pctx.opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
5504 pctx.opts->alloc_size = RBD_ALLOC_SIZE_DEFAULT;
5505 pctx.opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
5506 pctx.opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
5507 pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
5508 pctx.opts->trim = RBD_TRIM_DEFAULT;
5510 copts = ceph_parse_options(options, mon_addrs,
5511 mon_addrs + mon_addrs_size - 1,
5512 parse_rbd_opts_token, &pctx);
5513 if (IS_ERR(copts)) {
5514 ret = PTR_ERR(copts);
5521 *rbd_spec = pctx.spec;
5528 rbd_spec_put(pctx.spec);
5534 static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
5536 down_write(&rbd_dev->lock_rwsem);
5537 if (__rbd_is_lock_owner(rbd_dev))
5538 rbd_unlock(rbd_dev);
5539 up_write(&rbd_dev->lock_rwsem);
5542 static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
5546 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
5547 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
5551 /* FIXME: "rbd map --exclusive" should be in interruptible */
5552 down_read(&rbd_dev->lock_rwsem);
5553 ret = rbd_wait_state_locked(rbd_dev, true);
5554 up_read(&rbd_dev->lock_rwsem);
5556 rbd_warn(rbd_dev, "failed to acquire exclusive lock");
5564 * An rbd format 2 image has a unique identifier, distinct from the
5565 * name given to it by the user. Internally, that identifier is
5566 * what's used to specify the names of objects related to the image.
5568 * A special "rbd id" object is used to map an rbd image name to its
5569 * id. If that object doesn't exist, then there is no v2 rbd image
5570 * with the supplied name.
5572 * This function will record the given rbd_dev's image_id field if
5573 * it can be determined, and in that case will return 0. If any
5574 * errors occur a negative errno will be returned and the rbd_dev's
5575 * image_id field will be unchanged (and should be NULL).
5577 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
5581 CEPH_DEFINE_OID_ONSTACK(oid);
5586 * When probing a parent image, the image id is already
5587 * known (and the image name likely is not). There's no
5588 * need to fetch the image id again in this case. We
5589 * do still need to set the image format though.
5591 if (rbd_dev->spec->image_id) {
5592 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
5598 * First, see if the format 2 image id file exists, and if
5599 * so, get the image's persistent id from it.
5601 ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX,
5602 rbd_dev->spec->image_name);
5606 dout("rbd id object name is %s\n", oid.name);
5608 /* Response will be an encoded string, which includes a length */
5610 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
5611 response = kzalloc(size, GFP_NOIO);
5617 /* If it doesn't exist we'll assume it's a format 1 image */
5619 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
5621 response, RBD_IMAGE_ID_LEN_MAX);
5622 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5623 if (ret == -ENOENT) {
5624 image_id = kstrdup("", GFP_KERNEL);
5625 ret = image_id ? 0 : -ENOMEM;
5627 rbd_dev->image_format = 1;
5628 } else if (ret >= 0) {
5631 image_id = ceph_extract_encoded_string(&p, p + ret,
5633 ret = PTR_ERR_OR_ZERO(image_id);
5635 rbd_dev->image_format = 2;
5639 rbd_dev->spec->image_id = image_id;
5640 dout("image_id is %s\n", image_id);
5644 ceph_oid_destroy(&oid);
5649 * Undo whatever state changes are made by v1 or v2 header info
5652 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5654 struct rbd_image_header *header;
5656 rbd_dev_parent_put(rbd_dev);
5658 /* Free dynamic fields from the header, then zero it out */
5660 header = &rbd_dev->header;
5661 ceph_put_snap_context(header->snapc);
5662 kfree(header->snap_sizes);
5663 kfree(header->snap_names);
5664 kfree(header->object_prefix);
5665 memset(header, 0, sizeof (*header));
5668 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
5672 ret = rbd_dev_v2_object_prefix(rbd_dev);
5677 * Get the and check features for the image. Currently the
5678 * features are assumed to never change.
5680 ret = rbd_dev_v2_features(rbd_dev);
5684 /* If the image supports fancy striping, get its parameters */
5686 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5687 ret = rbd_dev_v2_striping_info(rbd_dev);
5692 if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
5693 ret = rbd_dev_v2_data_pool(rbd_dev);
5698 rbd_init_layout(rbd_dev);
5702 rbd_dev->header.features = 0;
5703 kfree(rbd_dev->header.object_prefix);
5704 rbd_dev->header.object_prefix = NULL;
5709 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
5710 * rbd_dev_image_probe() recursion depth, which means it's also the
5711 * length of the already discovered part of the parent chain.
5713 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
5715 struct rbd_device *parent = NULL;
5718 if (!rbd_dev->parent_spec)
5721 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
5722 pr_info("parent chain is too long (%d)\n", depth);
5727 parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
5734 * Images related by parent/child relationships always share
5735 * rbd_client and spec/parent_spec, so bump their refcounts.
5737 __rbd_get_client(rbd_dev->rbd_client);
5738 rbd_spec_get(rbd_dev->parent_spec);
5740 ret = rbd_dev_image_probe(parent, depth);
5744 rbd_dev->parent = parent;
5745 atomic_set(&rbd_dev->parent_ref, 1);
5749 rbd_dev_unparent(rbd_dev);
5750 rbd_dev_destroy(parent);
5754 static void rbd_dev_device_release(struct rbd_device *rbd_dev)
5756 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5757 rbd_dev_mapping_clear(rbd_dev);
5758 rbd_free_disk(rbd_dev);
5760 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5764 * rbd_dev->header_rwsem must be locked for write and will be unlocked
5767 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5771 /* Record our major and minor device numbers. */
5773 if (!single_major) {
5774 ret = register_blkdev(0, rbd_dev->name);
5776 goto err_out_unlock;
5778 rbd_dev->major = ret;
5781 rbd_dev->major = rbd_major;
5782 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5785 /* Set up the blkdev mapping. */
5787 ret = rbd_init_disk(rbd_dev);
5789 goto err_out_blkdev;
5791 ret = rbd_dev_mapping_set(rbd_dev);
5795 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
5796 set_disk_ro(rbd_dev->disk, rbd_dev->opts->read_only);
5798 ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
5800 goto err_out_mapping;
5802 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5803 up_write(&rbd_dev->header_rwsem);
5807 rbd_dev_mapping_clear(rbd_dev);
5809 rbd_free_disk(rbd_dev);
5812 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5814 up_write(&rbd_dev->header_rwsem);
5818 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5820 struct rbd_spec *spec = rbd_dev->spec;
5823 /* Record the header object name for this rbd image. */
5825 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5826 if (rbd_dev->image_format == 1)
5827 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
5828 spec->image_name, RBD_SUFFIX);
5830 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
5831 RBD_HEADER_PREFIX, spec->image_id);
5836 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5838 rbd_dev_unprobe(rbd_dev);
5840 rbd_unregister_watch(rbd_dev);
5841 rbd_dev->image_format = 0;
5842 kfree(rbd_dev->spec->image_id);
5843 rbd_dev->spec->image_id = NULL;
5847 * Probe for the existence of the header object for the given rbd
5848 * device. If this image is the one being mapped (i.e., not a
5849 * parent), initiate a watch on its header object before using that
5850 * object to get detailed information about the rbd image.
5852 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
5857 * Get the id from the image id object. Unless there's an
5858 * error, rbd_dev->spec->image_id will be filled in with
5859 * a dynamically-allocated string, and rbd_dev->image_format
5860 * will be set to either 1 or 2.
5862 ret = rbd_dev_image_id(rbd_dev);
5866 ret = rbd_dev_header_name(rbd_dev);
5868 goto err_out_format;
5871 ret = rbd_register_watch(rbd_dev);
5874 pr_info("image %s/%s%s%s does not exist\n",
5875 rbd_dev->spec->pool_name,
5876 rbd_dev->spec->pool_ns ?: "",
5877 rbd_dev->spec->pool_ns ? "/" : "",
5878 rbd_dev->spec->image_name);
5879 goto err_out_format;
5883 ret = rbd_dev_header_info(rbd_dev);
5888 * If this image is the one being mapped, we have pool name and
5889 * id, image name and id, and snap name - need to fill snap id.
5890 * Otherwise this is a parent image, identified by pool, image
5891 * and snap ids - need to fill in names for those ids.
5894 ret = rbd_spec_fill_snap_id(rbd_dev);
5896 ret = rbd_spec_fill_names(rbd_dev);
5899 pr_info("snap %s/%s%s%s@%s does not exist\n",
5900 rbd_dev->spec->pool_name,
5901 rbd_dev->spec->pool_ns ?: "",
5902 rbd_dev->spec->pool_ns ? "/" : "",
5903 rbd_dev->spec->image_name,
5904 rbd_dev->spec->snap_name);
5908 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
5909 ret = rbd_dev_v2_parent_info(rbd_dev);
5914 ret = rbd_dev_probe_parent(rbd_dev, depth);
5918 dout("discovered format %u image, header name is %s\n",
5919 rbd_dev->image_format, rbd_dev->header_oid.name);
5923 rbd_dev_unprobe(rbd_dev);
5926 rbd_unregister_watch(rbd_dev);
5928 rbd_dev->image_format = 0;
5929 kfree(rbd_dev->spec->image_id);
5930 rbd_dev->spec->image_id = NULL;
5934 static ssize_t do_rbd_add(struct bus_type *bus,
5938 struct rbd_device *rbd_dev = NULL;
5939 struct ceph_options *ceph_opts = NULL;
5940 struct rbd_options *rbd_opts = NULL;
5941 struct rbd_spec *spec = NULL;
5942 struct rbd_client *rbdc;
5945 if (!try_module_get(THIS_MODULE))
5948 /* parse add command */
5949 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5953 rbdc = rbd_get_client(ceph_opts);
5960 rc = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, spec->pool_name);
5963 pr_info("pool %s does not exist\n", spec->pool_name);
5964 goto err_out_client;
5966 spec->pool_id = (u64)rc;
5968 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
5971 goto err_out_client;
5973 rbdc = NULL; /* rbd_dev now owns this */
5974 spec = NULL; /* rbd_dev now owns this */
5975 rbd_opts = NULL; /* rbd_dev now owns this */
5977 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
5978 if (!rbd_dev->config_info) {
5980 goto err_out_rbd_dev;
5983 down_write(&rbd_dev->header_rwsem);
5984 rc = rbd_dev_image_probe(rbd_dev, 0);
5986 up_write(&rbd_dev->header_rwsem);
5987 goto err_out_rbd_dev;
5990 /* If we are mapping a snapshot it must be marked read-only */
5991 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5992 rbd_dev->opts->read_only = true;
5994 if (rbd_dev->opts->alloc_size > rbd_dev->layout.object_size) {
5995 rbd_warn(rbd_dev, "alloc_size adjusted to %u",
5996 rbd_dev->layout.object_size);
5997 rbd_dev->opts->alloc_size = rbd_dev->layout.object_size;
6000 rc = rbd_dev_device_setup(rbd_dev);
6002 goto err_out_image_probe;
6004 if (rbd_dev->opts->exclusive) {
6005 rc = rbd_add_acquire_lock(rbd_dev);
6007 goto err_out_device_setup;
6010 /* Everything's ready. Announce the disk to the world. */
6012 rc = device_add(&rbd_dev->dev);
6014 goto err_out_image_lock;
6016 add_disk(rbd_dev->disk);
6017 /* see rbd_init_disk() */
6018 blk_put_queue(rbd_dev->disk->queue);
6020 spin_lock(&rbd_dev_list_lock);
6021 list_add_tail(&rbd_dev->node, &rbd_dev_list);
6022 spin_unlock(&rbd_dev_list_lock);
6024 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
6025 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
6026 rbd_dev->header.features);
6029 module_put(THIS_MODULE);
6033 rbd_dev_image_unlock(rbd_dev);
6034 err_out_device_setup:
6035 rbd_dev_device_release(rbd_dev);
6036 err_out_image_probe:
6037 rbd_dev_image_release(rbd_dev);
6039 rbd_dev_destroy(rbd_dev);
6041 rbd_put_client(rbdc);
6048 static ssize_t rbd_add(struct bus_type *bus,
6055 return do_rbd_add(bus, buf, count);
6058 static ssize_t rbd_add_single_major(struct bus_type *bus,
6062 return do_rbd_add(bus, buf, count);
6065 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
6067 while (rbd_dev->parent) {
6068 struct rbd_device *first = rbd_dev;
6069 struct rbd_device *second = first->parent;
6070 struct rbd_device *third;
6073 * Follow to the parent with no grandparent and
6076 while (second && (third = second->parent)) {
6081 rbd_dev_image_release(second);
6082 rbd_dev_destroy(second);
6083 first->parent = NULL;
6084 first->parent_overlap = 0;
6086 rbd_assert(first->parent_spec);
6087 rbd_spec_put(first->parent_spec);
6088 first->parent_spec = NULL;
6092 static ssize_t do_rbd_remove(struct bus_type *bus,
6096 struct rbd_device *rbd_dev = NULL;
6097 struct list_head *tmp;
6105 sscanf(buf, "%d %5s", &dev_id, opt_buf);
6107 pr_err("dev_id out of range\n");
6110 if (opt_buf[0] != '\0') {
6111 if (!strcmp(opt_buf, "force")) {
6114 pr_err("bad remove option at '%s'\n", opt_buf);
6120 spin_lock(&rbd_dev_list_lock);
6121 list_for_each(tmp, &rbd_dev_list) {
6122 rbd_dev = list_entry(tmp, struct rbd_device, node);
6123 if (rbd_dev->dev_id == dev_id) {
6129 spin_lock_irq(&rbd_dev->lock);
6130 if (rbd_dev->open_count && !force)
6132 else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING,
6135 spin_unlock_irq(&rbd_dev->lock);
6137 spin_unlock(&rbd_dev_list_lock);
6143 * Prevent new IO from being queued and wait for existing
6144 * IO to complete/fail.
6146 blk_mq_freeze_queue(rbd_dev->disk->queue);
6147 blk_set_queue_dying(rbd_dev->disk->queue);
6150 del_gendisk(rbd_dev->disk);
6151 spin_lock(&rbd_dev_list_lock);
6152 list_del_init(&rbd_dev->node);
6153 spin_unlock(&rbd_dev_list_lock);
6154 device_del(&rbd_dev->dev);
6156 rbd_dev_image_unlock(rbd_dev);
6157 rbd_dev_device_release(rbd_dev);
6158 rbd_dev_image_release(rbd_dev);
6159 rbd_dev_destroy(rbd_dev);
6163 static ssize_t rbd_remove(struct bus_type *bus,
6170 return do_rbd_remove(bus, buf, count);
6173 static ssize_t rbd_remove_single_major(struct bus_type *bus,
6177 return do_rbd_remove(bus, buf, count);
6181 * create control files in sysfs
6184 static int __init rbd_sysfs_init(void)
6188 ret = device_register(&rbd_root_dev);
6192 ret = bus_register(&rbd_bus_type);
6194 device_unregister(&rbd_root_dev);
6199 static void __exit rbd_sysfs_cleanup(void)
6201 bus_unregister(&rbd_bus_type);
6202 device_unregister(&rbd_root_dev);
6205 static int __init rbd_slab_init(void)
6207 rbd_assert(!rbd_img_request_cache);
6208 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
6209 if (!rbd_img_request_cache)
6212 rbd_assert(!rbd_obj_request_cache);
6213 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
6214 if (!rbd_obj_request_cache)
6220 kmem_cache_destroy(rbd_img_request_cache);
6221 rbd_img_request_cache = NULL;
6225 static void rbd_slab_exit(void)
6227 rbd_assert(rbd_obj_request_cache);
6228 kmem_cache_destroy(rbd_obj_request_cache);
6229 rbd_obj_request_cache = NULL;
6231 rbd_assert(rbd_img_request_cache);
6232 kmem_cache_destroy(rbd_img_request_cache);
6233 rbd_img_request_cache = NULL;
6236 static int __init rbd_init(void)
6240 if (!libceph_compatible(NULL)) {
6241 rbd_warn(NULL, "libceph incompatibility (quitting)");
6245 rc = rbd_slab_init();
6250 * The number of active work items is limited by the number of
6251 * rbd devices * queue depth, so leave @max_active at default.
6253 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
6260 rbd_major = register_blkdev(0, RBD_DRV_NAME);
6261 if (rbd_major < 0) {
6267 rc = rbd_sysfs_init();
6269 goto err_out_blkdev;
6272 pr_info("loaded (major %d)\n", rbd_major);
6274 pr_info("loaded\n");
6280 unregister_blkdev(rbd_major, RBD_DRV_NAME);
6282 destroy_workqueue(rbd_wq);
6288 static void __exit rbd_exit(void)
6290 ida_destroy(&rbd_dev_id_ida);
6291 rbd_sysfs_cleanup();
6293 unregister_blkdev(rbd_major, RBD_DRV_NAME);
6294 destroy_workqueue(rbd_wq);
6298 module_init(rbd_init);
6299 module_exit(rbd_exit);
6301 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
6302 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
6303 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
6304 /* following authorship retained from original osdblk.c */
6305 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
6307 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
6308 MODULE_LICENSE("GPL");