3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/cls_lock_client.h>
35 #include <linux/ceph/striper.h>
36 #include <linux/ceph/decode.h>
37 #include <linux/fs_parser.h>
38 #include <linux/bsearch.h>
40 #include <linux/kernel.h>
41 #include <linux/device.h>
42 #include <linux/module.h>
43 #include <linux/blk-mq.h>
45 #include <linux/blkdev.h>
46 #include <linux/slab.h>
47 #include <linux/idr.h>
48 #include <linux/workqueue.h>
50 #include "rbd_types.h"
52 #define RBD_DEBUG /* Activate rbd_assert() calls */
55 * Increment the given counter and return its updated value.
56 * If the counter is already 0 it will not be incremented.
57 * If the counter is already at its maximum value returns
58 * -EINVAL without updating it.
60 static int atomic_inc_return_safe(atomic_t *v)
64 counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0);
65 if (counter <= (unsigned int)INT_MAX)
73 /* Decrement the counter. Return the resulting value, or -EINVAL */
74 static int atomic_dec_return_safe(atomic_t *v)
78 counter = atomic_dec_return(v);
87 #define RBD_DRV_NAME "rbd"
89 #define RBD_MINORS_PER_MAJOR 256
90 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
92 #define RBD_MAX_PARENT_CHAIN_LEN 16
94 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
95 #define RBD_MAX_SNAP_NAME_LEN \
96 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
98 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
100 #define RBD_SNAP_HEAD_NAME "-"
102 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
104 /* This allows a single page to hold an image name sent by OSD */
105 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
106 #define RBD_IMAGE_ID_LEN_MAX 64
108 #define RBD_OBJ_PREFIX_LEN_MAX 64
110 #define RBD_NOTIFY_TIMEOUT 5 /* seconds */
111 #define RBD_RETRY_DELAY msecs_to_jiffies(1000)
115 #define RBD_FEATURE_LAYERING (1ULL<<0)
116 #define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
117 #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
118 #define RBD_FEATURE_OBJECT_MAP (1ULL<<3)
119 #define RBD_FEATURE_FAST_DIFF (1ULL<<4)
120 #define RBD_FEATURE_DEEP_FLATTEN (1ULL<<5)
121 #define RBD_FEATURE_DATA_POOL (1ULL<<7)
122 #define RBD_FEATURE_OPERATIONS (1ULL<<8)
124 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
125 RBD_FEATURE_STRIPINGV2 | \
126 RBD_FEATURE_EXCLUSIVE_LOCK | \
127 RBD_FEATURE_OBJECT_MAP | \
128 RBD_FEATURE_FAST_DIFF | \
129 RBD_FEATURE_DEEP_FLATTEN | \
130 RBD_FEATURE_DATA_POOL | \
131 RBD_FEATURE_OPERATIONS)
133 /* Features supported by this (client software) implementation. */
135 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
138 * An RBD device name will be "rbd#", where the "rbd" comes from
139 * RBD_DRV_NAME above, and # is a unique integer identifier.
141 #define DEV_NAME_LEN 32
144 * block device image metadata (in-memory version)
146 struct rbd_image_header {
147 /* These six fields never change for a given rbd image */
153 u64 features; /* Might be changeable someday? */
155 /* The remaining fields need to be updated occasionally */
157 struct ceph_snap_context *snapc;
158 char *snap_names; /* format 1 only */
159 u64 *snap_sizes; /* format 1 only */
163 * An rbd image specification.
165 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
166 * identify an image. Each rbd_dev structure includes a pointer to
167 * an rbd_spec structure that encapsulates this identity.
169 * Each of the id's in an rbd_spec has an associated name. For a
170 * user-mapped image, the names are supplied and the id's associated
171 * with them are looked up. For a layered image, a parent image is
172 * defined by the tuple, and the names are looked up.
174 * An rbd_dev structure contains a parent_spec pointer which is
175 * non-null if the image it represents is a child in a layered
176 * image. This pointer will refer to the rbd_spec structure used
177 * by the parent rbd_dev for its own identity (i.e., the structure
178 * is shared between the parent and child).
180 * Since these structures are populated once, during the discovery
181 * phase of image construction, they are effectively immutable so
182 * we make no effort to synchronize access to them.
184 * Note that code herein does not assume the image name is known (it
185 * could be a null pointer).
189 const char *pool_name;
190 const char *pool_ns; /* NULL if default, never "" */
192 const char *image_id;
193 const char *image_name;
196 const char *snap_name;
202 * an instance of the client. multiple devices may share an rbd client.
205 struct ceph_client *client;
207 struct list_head node;
210 struct pending_result {
211 int result; /* first nonzero result */
215 struct rbd_img_request;
217 enum obj_request_type {
218 OBJ_REQUEST_NODATA = 1,
219 OBJ_REQUEST_BIO, /* pointer into provided bio (list) */
220 OBJ_REQUEST_BVECS, /* pointer into provided bio_vec array */
221 OBJ_REQUEST_OWN_BVECS, /* private bio_vec array, doesn't own pages */
224 enum obj_operation_type {
231 #define RBD_OBJ_FLAG_DELETION (1U << 0)
232 #define RBD_OBJ_FLAG_COPYUP_ENABLED (1U << 1)
233 #define RBD_OBJ_FLAG_COPYUP_ZEROS (1U << 2)
234 #define RBD_OBJ_FLAG_MAY_EXIST (1U << 3)
235 #define RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT (1U << 4)
237 enum rbd_obj_read_state {
238 RBD_OBJ_READ_START = 1,
244 * Writes go through the following state machine to deal with
247 * . . . . . RBD_OBJ_WRITE_GUARD. . . . . . . . . . . . . .
250 * . RBD_OBJ_WRITE_READ_FROM_PARENT. . . .
252 * . v v (deep-copyup .
253 * (image . RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC . not needed) .
256 * . . . .RBD_OBJ_WRITE_COPYUP_OPS. . . . . (copyup .
259 * done . . . . . . . . . . . . . . . . . .
264 * Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether
265 * assert_exists guard is needed or not (in some cases it's not needed
266 * even if there is a parent).
268 enum rbd_obj_write_state {
269 RBD_OBJ_WRITE_START = 1,
270 RBD_OBJ_WRITE_PRE_OBJECT_MAP,
271 RBD_OBJ_WRITE_OBJECT,
272 __RBD_OBJ_WRITE_COPYUP,
273 RBD_OBJ_WRITE_COPYUP,
274 RBD_OBJ_WRITE_POST_OBJECT_MAP,
277 enum rbd_obj_copyup_state {
278 RBD_OBJ_COPYUP_START = 1,
279 RBD_OBJ_COPYUP_READ_PARENT,
280 __RBD_OBJ_COPYUP_OBJECT_MAPS,
281 RBD_OBJ_COPYUP_OBJECT_MAPS,
282 __RBD_OBJ_COPYUP_WRITE_OBJECT,
283 RBD_OBJ_COPYUP_WRITE_OBJECT,
286 struct rbd_obj_request {
287 struct ceph_object_extent ex;
288 unsigned int flags; /* RBD_OBJ_FLAG_* */
290 enum rbd_obj_read_state read_state; /* for reads */
291 enum rbd_obj_write_state write_state; /* for writes */
294 struct rbd_img_request *img_request;
295 struct ceph_file_extent *img_extents;
299 struct ceph_bio_iter bio_pos;
301 struct ceph_bvec_iter bvec_pos;
307 enum rbd_obj_copyup_state copyup_state;
308 struct bio_vec *copyup_bvecs;
309 u32 copyup_bvec_count;
311 struct list_head osd_reqs; /* w/ r_private_item */
313 struct mutex state_mutex;
314 struct pending_result pending;
319 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
320 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
325 RBD_IMG_EXCLUSIVE_LOCK,
326 __RBD_IMG_OBJECT_REQUESTS,
327 RBD_IMG_OBJECT_REQUESTS,
330 struct rbd_img_request {
331 struct rbd_device *rbd_dev;
332 enum obj_operation_type op_type;
333 enum obj_request_type data_type;
335 enum rbd_img_state state;
337 u64 snap_id; /* for reads */
338 struct ceph_snap_context *snapc; /* for writes */
340 struct rbd_obj_request *obj_request; /* obj req initiator */
342 struct list_head lock_item;
343 struct list_head object_extents; /* obj_req.ex structs */
345 struct mutex state_mutex;
346 struct pending_result pending;
347 struct work_struct work;
351 #define for_each_obj_request(ireq, oreq) \
352 list_for_each_entry(oreq, &(ireq)->object_extents, ex.oe_item)
353 #define for_each_obj_request_safe(ireq, oreq, n) \
354 list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item)
356 enum rbd_watch_state {
357 RBD_WATCH_STATE_UNREGISTERED,
358 RBD_WATCH_STATE_REGISTERED,
359 RBD_WATCH_STATE_ERROR,
362 enum rbd_lock_state {
363 RBD_LOCK_STATE_UNLOCKED,
364 RBD_LOCK_STATE_LOCKED,
365 RBD_LOCK_STATE_RELEASING,
368 /* WatchNotify::ClientId */
369 struct rbd_client_id {
382 int dev_id; /* blkdev unique id */
384 int major; /* blkdev assigned major */
386 struct gendisk *disk; /* blkdev's gendisk and rq */
388 u32 image_format; /* Either 1 or 2 */
389 struct rbd_client *rbd_client;
391 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
393 spinlock_t lock; /* queue, flags, open_count */
395 struct rbd_image_header header;
396 unsigned long flags; /* possibly lock protected */
397 struct rbd_spec *spec;
398 struct rbd_options *opts;
399 char *config_info; /* add{,_single_major} string */
401 struct ceph_object_id header_oid;
402 struct ceph_object_locator header_oloc;
404 struct ceph_file_layout layout; /* used for all rbd requests */
406 struct mutex watch_mutex;
407 enum rbd_watch_state watch_state;
408 struct ceph_osd_linger_request *watch_handle;
410 struct delayed_work watch_dwork;
412 struct rw_semaphore lock_rwsem;
413 enum rbd_lock_state lock_state;
414 char lock_cookie[32];
415 struct rbd_client_id owner_cid;
416 struct work_struct acquired_lock_work;
417 struct work_struct released_lock_work;
418 struct delayed_work lock_dwork;
419 struct work_struct unlock_work;
420 spinlock_t lock_lists_lock;
421 struct list_head acquiring_list;
422 struct list_head running_list;
423 struct completion acquire_wait;
425 struct completion releasing_wait;
427 spinlock_t object_map_lock;
429 u64 object_map_size; /* in objects */
430 u64 object_map_flags;
432 struct workqueue_struct *task_wq;
434 struct rbd_spec *parent_spec;
437 struct rbd_device *parent;
439 /* Block layer tags. */
440 struct blk_mq_tag_set tag_set;
442 /* protects updating the header */
443 struct rw_semaphore header_rwsem;
445 struct rbd_mapping mapping;
447 struct list_head node;
451 unsigned long open_count; /* protected by lock */
455 * Flag bits for rbd_dev->flags:
456 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
460 RBD_DEV_FLAG_EXISTS, /* rbd_dev_device_setup() ran */
461 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
462 RBD_DEV_FLAG_READONLY, /* -o ro or snapshot */
465 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
467 static LIST_HEAD(rbd_dev_list); /* devices */
468 static DEFINE_SPINLOCK(rbd_dev_list_lock);
470 static LIST_HEAD(rbd_client_list); /* clients */
471 static DEFINE_SPINLOCK(rbd_client_list_lock);
473 /* Slab caches for frequently-allocated structures */
475 static struct kmem_cache *rbd_img_request_cache;
476 static struct kmem_cache *rbd_obj_request_cache;
478 static int rbd_major;
479 static DEFINE_IDA(rbd_dev_id_ida);
481 static struct workqueue_struct *rbd_wq;
483 static struct ceph_snap_context rbd_empty_snapc = {
484 .nref = REFCOUNT_INIT(1),
488 * single-major requires >= 0.75 version of userspace rbd utility.
490 static bool single_major = true;
491 module_param(single_major, bool, 0444);
492 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
494 static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count);
495 static ssize_t remove_store(struct bus_type *bus, const char *buf,
497 static ssize_t add_single_major_store(struct bus_type *bus, const char *buf,
499 static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf,
501 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
503 static int rbd_dev_id_to_minor(int dev_id)
505 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
508 static int minor_to_rbd_dev_id(int minor)
510 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
513 static bool rbd_is_ro(struct rbd_device *rbd_dev)
515 return test_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
518 static bool rbd_is_snap(struct rbd_device *rbd_dev)
520 return rbd_dev->spec->snap_id != CEPH_NOSNAP;
523 static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
525 lockdep_assert_held(&rbd_dev->lock_rwsem);
527 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
528 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
531 static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
535 down_read(&rbd_dev->lock_rwsem);
536 is_lock_owner = __rbd_is_lock_owner(rbd_dev);
537 up_read(&rbd_dev->lock_rwsem);
538 return is_lock_owner;
541 static ssize_t supported_features_show(struct bus_type *bus, char *buf)
543 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
546 static BUS_ATTR_WO(add);
547 static BUS_ATTR_WO(remove);
548 static BUS_ATTR_WO(add_single_major);
549 static BUS_ATTR_WO(remove_single_major);
550 static BUS_ATTR_RO(supported_features);
552 static struct attribute *rbd_bus_attrs[] = {
554 &bus_attr_remove.attr,
555 &bus_attr_add_single_major.attr,
556 &bus_attr_remove_single_major.attr,
557 &bus_attr_supported_features.attr,
561 static umode_t rbd_bus_is_visible(struct kobject *kobj,
562 struct attribute *attr, int index)
565 (attr == &bus_attr_add_single_major.attr ||
566 attr == &bus_attr_remove_single_major.attr))
572 static const struct attribute_group rbd_bus_group = {
573 .attrs = rbd_bus_attrs,
574 .is_visible = rbd_bus_is_visible,
576 __ATTRIBUTE_GROUPS(rbd_bus);
578 static struct bus_type rbd_bus_type = {
580 .bus_groups = rbd_bus_groups,
583 static void rbd_root_dev_release(struct device *dev)
587 static struct device rbd_root_dev = {
589 .release = rbd_root_dev_release,
592 static __printf(2, 3)
593 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
595 struct va_format vaf;
603 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
604 else if (rbd_dev->disk)
605 printk(KERN_WARNING "%s: %s: %pV\n",
606 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
607 else if (rbd_dev->spec && rbd_dev->spec->image_name)
608 printk(KERN_WARNING "%s: image %s: %pV\n",
609 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
610 else if (rbd_dev->spec && rbd_dev->spec->image_id)
611 printk(KERN_WARNING "%s: id %s: %pV\n",
612 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
614 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
615 RBD_DRV_NAME, rbd_dev, &vaf);
620 #define rbd_assert(expr) \
621 if (unlikely(!(expr))) { \
622 printk(KERN_ERR "\nAssertion failure in %s() " \
624 "\trbd_assert(%s);\n\n", \
625 __func__, __LINE__, #expr); \
628 #else /* !RBD_DEBUG */
629 # define rbd_assert(expr) ((void) 0)
630 #endif /* !RBD_DEBUG */
632 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
634 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
635 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
636 static int rbd_dev_header_info(struct rbd_device *rbd_dev);
637 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
638 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
640 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
641 u8 *order, u64 *snap_size);
642 static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev);
644 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result);
645 static void rbd_img_handle_request(struct rbd_img_request *img_req, int result);
648 * Return true if nothing else is pending.
650 static bool pending_result_dec(struct pending_result *pending, int *result)
652 rbd_assert(pending->num_pending > 0);
654 if (*result && !pending->result)
655 pending->result = *result;
656 if (--pending->num_pending)
659 *result = pending->result;
663 static int rbd_open(struct block_device *bdev, fmode_t mode)
665 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
666 bool removing = false;
668 spin_lock_irq(&rbd_dev->lock);
669 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
672 rbd_dev->open_count++;
673 spin_unlock_irq(&rbd_dev->lock);
677 (void) get_device(&rbd_dev->dev);
682 static void rbd_release(struct gendisk *disk, fmode_t mode)
684 struct rbd_device *rbd_dev = disk->private_data;
685 unsigned long open_count_before;
687 spin_lock_irq(&rbd_dev->lock);
688 open_count_before = rbd_dev->open_count--;
689 spin_unlock_irq(&rbd_dev->lock);
690 rbd_assert(open_count_before > 0);
692 put_device(&rbd_dev->dev);
695 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
699 if (get_user(ro, (int __user *)arg))
703 * Both images mapped read-only and snapshots can't be marked
707 if (rbd_is_ro(rbd_dev))
710 rbd_assert(!rbd_is_snap(rbd_dev));
713 /* Let blkdev_roset() handle it */
717 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
718 unsigned int cmd, unsigned long arg)
720 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
725 ret = rbd_ioctl_set_ro(rbd_dev, arg);
735 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
736 unsigned int cmd, unsigned long arg)
738 return rbd_ioctl(bdev, mode, cmd, arg);
740 #endif /* CONFIG_COMPAT */
742 static const struct block_device_operations rbd_bd_ops = {
743 .owner = THIS_MODULE,
745 .release = rbd_release,
748 .compat_ioctl = rbd_compat_ioctl,
753 * Initialize an rbd client instance. Success or not, this function
754 * consumes ceph_opts. Caller holds client_mutex.
756 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
758 struct rbd_client *rbdc;
761 dout("%s:\n", __func__);
762 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
766 kref_init(&rbdc->kref);
767 INIT_LIST_HEAD(&rbdc->node);
769 rbdc->client = ceph_create_client(ceph_opts, rbdc);
770 if (IS_ERR(rbdc->client))
772 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
774 ret = ceph_open_session(rbdc->client);
778 spin_lock(&rbd_client_list_lock);
779 list_add_tail(&rbdc->node, &rbd_client_list);
780 spin_unlock(&rbd_client_list_lock);
782 dout("%s: rbdc %p\n", __func__, rbdc);
786 ceph_destroy_client(rbdc->client);
791 ceph_destroy_options(ceph_opts);
792 dout("%s: error %d\n", __func__, ret);
797 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
799 kref_get(&rbdc->kref);
805 * Find a ceph client with specific addr and configuration. If
806 * found, bump its reference count.
808 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
810 struct rbd_client *client_node;
813 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
816 spin_lock(&rbd_client_list_lock);
817 list_for_each_entry(client_node, &rbd_client_list, node) {
818 if (!ceph_compare_options(ceph_opts, client_node->client)) {
819 __rbd_get_client(client_node);
825 spin_unlock(&rbd_client_list_lock);
827 return found ? client_node : NULL;
831 * (Per device) rbd map options
839 Opt_compression_hint,
840 /* string args above */
849 Opt_compression_hint_none,
850 Opt_compression_hint_compressible,
851 Opt_compression_hint_incompressible,
854 static const struct constant_table rbd_param_compression_hint[] = {
855 {"none", Opt_compression_hint_none},
856 {"compressible", Opt_compression_hint_compressible},
857 {"incompressible", Opt_compression_hint_incompressible},
861 static const struct fs_parameter_spec rbd_parameters[] = {
862 fsparam_u32 ("alloc_size", Opt_alloc_size),
863 fsparam_enum ("compression_hint", Opt_compression_hint,
864 rbd_param_compression_hint),
865 fsparam_flag ("exclusive", Opt_exclusive),
866 fsparam_flag ("lock_on_read", Opt_lock_on_read),
867 fsparam_u32 ("lock_timeout", Opt_lock_timeout),
868 fsparam_flag ("notrim", Opt_notrim),
869 fsparam_string ("_pool_ns", Opt_pool_ns),
870 fsparam_u32 ("queue_depth", Opt_queue_depth),
871 fsparam_flag ("read_only", Opt_read_only),
872 fsparam_flag ("read_write", Opt_read_write),
873 fsparam_flag ("ro", Opt_read_only),
874 fsparam_flag ("rw", Opt_read_write),
881 unsigned long lock_timeout;
887 u32 alloc_hint_flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */
890 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
891 #define RBD_ALLOC_SIZE_DEFAULT (64 * 1024)
892 #define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
893 #define RBD_READ_ONLY_DEFAULT false
894 #define RBD_LOCK_ON_READ_DEFAULT false
895 #define RBD_EXCLUSIVE_DEFAULT false
896 #define RBD_TRIM_DEFAULT true
898 struct rbd_parse_opts_ctx {
899 struct rbd_spec *spec;
900 struct ceph_options *copts;
901 struct rbd_options *opts;
904 static char* obj_op_name(enum obj_operation_type op_type)
921 * Destroy ceph client
923 * Caller must hold rbd_client_list_lock.
925 static void rbd_client_release(struct kref *kref)
927 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
929 dout("%s: rbdc %p\n", __func__, rbdc);
930 spin_lock(&rbd_client_list_lock);
931 list_del(&rbdc->node);
932 spin_unlock(&rbd_client_list_lock);
934 ceph_destroy_client(rbdc->client);
939 * Drop reference to ceph client node. If it's not referenced anymore, release
942 static void rbd_put_client(struct rbd_client *rbdc)
945 kref_put(&rbdc->kref, rbd_client_release);
949 * Get a ceph client with specific addr and configuration, if one does
950 * not exist create it. Either way, ceph_opts is consumed by this
953 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
955 struct rbd_client *rbdc;
958 mutex_lock(&client_mutex);
959 rbdc = rbd_client_find(ceph_opts);
961 ceph_destroy_options(ceph_opts);
964 * Using an existing client. Make sure ->pg_pools is up to
965 * date before we look up the pool id in do_rbd_add().
967 ret = ceph_wait_for_latest_osdmap(rbdc->client,
968 rbdc->client->options->mount_timeout);
970 rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
971 rbd_put_client(rbdc);
975 rbdc = rbd_client_create(ceph_opts);
977 mutex_unlock(&client_mutex);
982 static bool rbd_image_format_valid(u32 image_format)
984 return image_format == 1 || image_format == 2;
987 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
992 /* The header has to start with the magic rbd header text */
993 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
996 /* The bio layer requires at least sector-sized I/O */
998 if (ondisk->options.order < SECTOR_SHIFT)
1001 /* If we use u64 in a few spots we may be able to loosen this */
1003 if (ondisk->options.order > 8 * sizeof (int) - 1)
1007 * The size of a snapshot header has to fit in a size_t, and
1008 * that limits the number of snapshots.
1010 snap_count = le32_to_cpu(ondisk->snap_count);
1011 size = SIZE_MAX - sizeof (struct ceph_snap_context);
1012 if (snap_count > size / sizeof (__le64))
1016 * Not only that, but the size of the entire the snapshot
1017 * header must also be representable in a size_t.
1019 size -= snap_count * sizeof (__le64);
1020 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
1027 * returns the size of an object in the image
1029 static u32 rbd_obj_bytes(struct rbd_image_header *header)
1031 return 1U << header->obj_order;
1034 static void rbd_init_layout(struct rbd_device *rbd_dev)
1036 if (rbd_dev->header.stripe_unit == 0 ||
1037 rbd_dev->header.stripe_count == 0) {
1038 rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header);
1039 rbd_dev->header.stripe_count = 1;
1042 rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit;
1043 rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count;
1044 rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header);
1045 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ?
1046 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id;
1047 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
1051 * Fill an rbd image header with information from the given format 1
1054 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
1055 struct rbd_image_header_ondisk *ondisk)
1057 struct rbd_image_header *header = &rbd_dev->header;
1058 bool first_time = header->object_prefix == NULL;
1059 struct ceph_snap_context *snapc;
1060 char *object_prefix = NULL;
1061 char *snap_names = NULL;
1062 u64 *snap_sizes = NULL;
1067 /* Allocate this now to avoid having to handle failure below */
1070 object_prefix = kstrndup(ondisk->object_prefix,
1071 sizeof(ondisk->object_prefix),
1077 /* Allocate the snapshot context and fill it in */
1079 snap_count = le32_to_cpu(ondisk->snap_count);
1080 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
1083 snapc->seq = le64_to_cpu(ondisk->snap_seq);
1085 struct rbd_image_snap_ondisk *snaps;
1086 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
1088 /* We'll keep a copy of the snapshot names... */
1090 if (snap_names_len > (u64)SIZE_MAX)
1092 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
1096 /* ...as well as the array of their sizes. */
1097 snap_sizes = kmalloc_array(snap_count,
1098 sizeof(*header->snap_sizes),
1104 * Copy the names, and fill in each snapshot's id
1107 * Note that rbd_dev_v1_header_info() guarantees the
1108 * ondisk buffer we're working with has
1109 * snap_names_len bytes beyond the end of the
1110 * snapshot id array, this memcpy() is safe.
1112 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
1113 snaps = ondisk->snaps;
1114 for (i = 0; i < snap_count; i++) {
1115 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
1116 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
1120 /* We won't fail any more, fill in the header */
1123 header->object_prefix = object_prefix;
1124 header->obj_order = ondisk->options.order;
1125 rbd_init_layout(rbd_dev);
1127 ceph_put_snap_context(header->snapc);
1128 kfree(header->snap_names);
1129 kfree(header->snap_sizes);
1132 /* The remaining fields always get updated (when we refresh) */
1134 header->image_size = le64_to_cpu(ondisk->image_size);
1135 header->snapc = snapc;
1136 header->snap_names = snap_names;
1137 header->snap_sizes = snap_sizes;
1145 ceph_put_snap_context(snapc);
1146 kfree(object_prefix);
1151 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1153 const char *snap_name;
1155 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1157 /* Skip over names until we find the one we are looking for */
1159 snap_name = rbd_dev->header.snap_names;
1161 snap_name += strlen(snap_name) + 1;
1163 return kstrdup(snap_name, GFP_KERNEL);
1167 * Snapshot id comparison function for use with qsort()/bsearch().
1168 * Note that result is for snapshots in *descending* order.
1170 static int snapid_compare_reverse(const void *s1, const void *s2)
1172 u64 snap_id1 = *(u64 *)s1;
1173 u64 snap_id2 = *(u64 *)s2;
1175 if (snap_id1 < snap_id2)
1177 return snap_id1 == snap_id2 ? 0 : -1;
1181 * Search a snapshot context to see if the given snapshot id is
1184 * Returns the position of the snapshot id in the array if it's found,
1185 * or BAD_SNAP_INDEX otherwise.
1187 * Note: The snapshot array is in kept sorted (by the osd) in
1188 * reverse order, highest snapshot id first.
1190 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1192 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1195 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1196 sizeof (snap_id), snapid_compare_reverse);
1198 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1201 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1205 const char *snap_name;
1207 which = rbd_dev_snap_index(rbd_dev, snap_id);
1208 if (which == BAD_SNAP_INDEX)
1209 return ERR_PTR(-ENOENT);
1211 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1212 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1215 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1217 if (snap_id == CEPH_NOSNAP)
1218 return RBD_SNAP_HEAD_NAME;
1220 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1221 if (rbd_dev->image_format == 1)
1222 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1224 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1227 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1230 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1231 if (snap_id == CEPH_NOSNAP) {
1232 *snap_size = rbd_dev->header.image_size;
1233 } else if (rbd_dev->image_format == 1) {
1236 which = rbd_dev_snap_index(rbd_dev, snap_id);
1237 if (which == BAD_SNAP_INDEX)
1240 *snap_size = rbd_dev->header.snap_sizes[which];
1245 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1254 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1256 u64 snap_id = rbd_dev->spec->snap_id;
1260 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1264 rbd_dev->mapping.size = size;
1268 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1270 rbd_dev->mapping.size = 0;
1273 static void zero_bvec(struct bio_vec *bv)
1276 unsigned long flags;
1278 buf = bvec_kmap_irq(bv, &flags);
1279 memset(buf, 0, bv->bv_len);
1280 flush_dcache_page(bv->bv_page);
1281 bvec_kunmap_irq(buf, &flags);
1284 static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes)
1286 struct ceph_bio_iter it = *bio_pos;
1288 ceph_bio_iter_advance(&it, off);
1289 ceph_bio_iter_advance_step(&it, bytes, ({
1294 static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes)
1296 struct ceph_bvec_iter it = *bvec_pos;
1298 ceph_bvec_iter_advance(&it, off);
1299 ceph_bvec_iter_advance_step(&it, bytes, ({
1305 * Zero a range in @obj_req data buffer defined by a bio (list) or
1306 * (private) bio_vec array.
1308 * @off is relative to the start of the data buffer.
1310 static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off,
1313 dout("%s %p data buf %u~%u\n", __func__, obj_req, off, bytes);
1315 switch (obj_req->img_request->data_type) {
1316 case OBJ_REQUEST_BIO:
1317 zero_bios(&obj_req->bio_pos, off, bytes);
1319 case OBJ_REQUEST_BVECS:
1320 case OBJ_REQUEST_OWN_BVECS:
1321 zero_bvecs(&obj_req->bvec_pos, off, bytes);
1328 static void rbd_obj_request_destroy(struct kref *kref);
1329 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1331 rbd_assert(obj_request != NULL);
1332 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1333 kref_read(&obj_request->kref));
1334 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1337 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1338 struct rbd_obj_request *obj_request)
1340 rbd_assert(obj_request->img_request == NULL);
1342 /* Image request now owns object's original reference */
1343 obj_request->img_request = img_request;
1344 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1347 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1348 struct rbd_obj_request *obj_request)
1350 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1351 list_del(&obj_request->ex.oe_item);
1352 rbd_assert(obj_request->img_request == img_request);
1353 rbd_obj_request_put(obj_request);
1356 static void rbd_osd_submit(struct ceph_osd_request *osd_req)
1358 struct rbd_obj_request *obj_req = osd_req->r_priv;
1360 dout("%s osd_req %p for obj_req %p objno %llu %llu~%llu\n",
1361 __func__, osd_req, obj_req, obj_req->ex.oe_objno,
1362 obj_req->ex.oe_off, obj_req->ex.oe_len);
1363 ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
1367 * The default/initial value for all image request flags is 0. Each
1368 * is conditionally set to 1 at image request initialization time
1369 * and currently never change thereafter.
1371 static void img_request_layered_set(struct rbd_img_request *img_request)
1373 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1376 static bool img_request_layered_test(struct rbd_img_request *img_request)
1378 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1381 static bool rbd_obj_is_entire(struct rbd_obj_request *obj_req)
1383 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1385 return !obj_req->ex.oe_off &&
1386 obj_req->ex.oe_len == rbd_dev->layout.object_size;
1389 static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
1391 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1393 return obj_req->ex.oe_off + obj_req->ex.oe_len ==
1394 rbd_dev->layout.object_size;
1398 * Must be called after rbd_obj_calc_img_extents().
1400 static bool rbd_obj_copyup_enabled(struct rbd_obj_request *obj_req)
1402 if (!obj_req->num_img_extents ||
1403 (rbd_obj_is_entire(obj_req) &&
1404 !obj_req->img_request->snapc->num_snaps))
1410 static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
1412 return ceph_file_extents_bytes(obj_req->img_extents,
1413 obj_req->num_img_extents);
1416 static bool rbd_img_is_write(struct rbd_img_request *img_req)
1418 switch (img_req->op_type) {
1422 case OBJ_OP_DISCARD:
1423 case OBJ_OP_ZEROOUT:
1430 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
1432 struct rbd_obj_request *obj_req = osd_req->r_priv;
1435 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
1436 osd_req->r_result, obj_req);
1439 * Writes aren't allowed to return a data payload. In some
1440 * guarded write cases (e.g. stat + zero on an empty object)
1441 * a stat response makes it through, but we don't care.
1443 if (osd_req->r_result > 0 && rbd_img_is_write(obj_req->img_request))
1446 result = osd_req->r_result;
1448 rbd_obj_handle_request(obj_req, result);
1451 static void rbd_osd_format_read(struct ceph_osd_request *osd_req)
1453 struct rbd_obj_request *obj_request = osd_req->r_priv;
1455 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1456 osd_req->r_snapid = obj_request->img_request->snap_id;
1459 static void rbd_osd_format_write(struct ceph_osd_request *osd_req)
1461 struct rbd_obj_request *obj_request = osd_req->r_priv;
1463 osd_req->r_flags = CEPH_OSD_FLAG_WRITE;
1464 ktime_get_real_ts64(&osd_req->r_mtime);
1465 osd_req->r_data_offset = obj_request->ex.oe_off;
1468 static struct ceph_osd_request *
1469 __rbd_obj_add_osd_request(struct rbd_obj_request *obj_req,
1470 struct ceph_snap_context *snapc, int num_ops)
1472 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1473 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1474 struct ceph_osd_request *req;
1475 const char *name_format = rbd_dev->image_format == 1 ?
1476 RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT;
1479 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, GFP_NOIO);
1481 return ERR_PTR(-ENOMEM);
1483 list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
1484 req->r_callback = rbd_osd_req_callback;
1485 req->r_priv = obj_req;
1488 * Data objects may be stored in a separate pool, but always in
1489 * the same namespace in that pool as the header in its pool.
1491 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
1492 req->r_base_oloc.pool = rbd_dev->layout.pool_id;
1494 ret = ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format,
1495 rbd_dev->header.object_prefix,
1496 obj_req->ex.oe_objno);
1498 return ERR_PTR(ret);
1503 static struct ceph_osd_request *
1504 rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, int num_ops)
1506 return __rbd_obj_add_osd_request(obj_req, obj_req->img_request->snapc,
1510 static struct rbd_obj_request *rbd_obj_request_create(void)
1512 struct rbd_obj_request *obj_request;
1514 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
1518 ceph_object_extent_init(&obj_request->ex);
1519 INIT_LIST_HEAD(&obj_request->osd_reqs);
1520 mutex_init(&obj_request->state_mutex);
1521 kref_init(&obj_request->kref);
1523 dout("%s %p\n", __func__, obj_request);
1527 static void rbd_obj_request_destroy(struct kref *kref)
1529 struct rbd_obj_request *obj_request;
1530 struct ceph_osd_request *osd_req;
1533 obj_request = container_of(kref, struct rbd_obj_request, kref);
1535 dout("%s: obj %p\n", __func__, obj_request);
1537 while (!list_empty(&obj_request->osd_reqs)) {
1538 osd_req = list_first_entry(&obj_request->osd_reqs,
1539 struct ceph_osd_request, r_private_item);
1540 list_del_init(&osd_req->r_private_item);
1541 ceph_osdc_put_request(osd_req);
1544 switch (obj_request->img_request->data_type) {
1545 case OBJ_REQUEST_NODATA:
1546 case OBJ_REQUEST_BIO:
1547 case OBJ_REQUEST_BVECS:
1548 break; /* Nothing to do */
1549 case OBJ_REQUEST_OWN_BVECS:
1550 kfree(obj_request->bvec_pos.bvecs);
1556 kfree(obj_request->img_extents);
1557 if (obj_request->copyup_bvecs) {
1558 for (i = 0; i < obj_request->copyup_bvec_count; i++) {
1559 if (obj_request->copyup_bvecs[i].bv_page)
1560 __free_page(obj_request->copyup_bvecs[i].bv_page);
1562 kfree(obj_request->copyup_bvecs);
1565 kmem_cache_free(rbd_obj_request_cache, obj_request);
1568 /* It's OK to call this for a device with no parent */
1570 static void rbd_spec_put(struct rbd_spec *spec);
1571 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1573 rbd_dev_remove_parent(rbd_dev);
1574 rbd_spec_put(rbd_dev->parent_spec);
1575 rbd_dev->parent_spec = NULL;
1576 rbd_dev->parent_overlap = 0;
1580 * Parent image reference counting is used to determine when an
1581 * image's parent fields can be safely torn down--after there are no
1582 * more in-flight requests to the parent image. When the last
1583 * reference is dropped, cleaning them up is safe.
1585 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1589 if (!rbd_dev->parent_spec)
1592 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1596 /* Last reference; clean up parent data structures */
1599 rbd_dev_unparent(rbd_dev);
1601 rbd_warn(rbd_dev, "parent reference underflow");
1605 * If an image has a non-zero parent overlap, get a reference to its
1608 * Returns true if the rbd device has a parent with a non-zero
1609 * overlap and a reference for it was successfully taken, or
1612 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1616 if (!rbd_dev->parent_spec)
1619 if (rbd_dev->parent_overlap)
1620 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1623 rbd_warn(rbd_dev, "parent reference overflow");
1628 static void rbd_img_request_init(struct rbd_img_request *img_request,
1629 struct rbd_device *rbd_dev,
1630 enum obj_operation_type op_type)
1632 memset(img_request, 0, sizeof(*img_request));
1634 img_request->rbd_dev = rbd_dev;
1635 img_request->op_type = op_type;
1637 INIT_LIST_HEAD(&img_request->lock_item);
1638 INIT_LIST_HEAD(&img_request->object_extents);
1639 mutex_init(&img_request->state_mutex);
1642 static void rbd_img_capture_header(struct rbd_img_request *img_req)
1644 struct rbd_device *rbd_dev = img_req->rbd_dev;
1646 lockdep_assert_held(&rbd_dev->header_rwsem);
1648 if (rbd_img_is_write(img_req))
1649 img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
1651 img_req->snap_id = rbd_dev->spec->snap_id;
1653 if (rbd_dev_parent_get(rbd_dev))
1654 img_request_layered_set(img_req);
1657 static void rbd_img_request_destroy(struct rbd_img_request *img_request)
1659 struct rbd_obj_request *obj_request;
1660 struct rbd_obj_request *next_obj_request;
1662 dout("%s: img %p\n", __func__, img_request);
1664 WARN_ON(!list_empty(&img_request->lock_item));
1665 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1666 rbd_img_obj_request_del(img_request, obj_request);
1668 if (img_request_layered_test(img_request))
1669 rbd_dev_parent_put(img_request->rbd_dev);
1671 if (rbd_img_is_write(img_request))
1672 ceph_put_snap_context(img_request->snapc);
1674 if (test_bit(IMG_REQ_CHILD, &img_request->flags))
1675 kmem_cache_free(rbd_img_request_cache, img_request);
1678 #define BITS_PER_OBJ 2
1679 #define OBJS_PER_BYTE (BITS_PER_BYTE / BITS_PER_OBJ)
1680 #define OBJ_MASK ((1 << BITS_PER_OBJ) - 1)
1682 static void __rbd_object_map_index(struct rbd_device *rbd_dev, u64 objno,
1683 u64 *index, u8 *shift)
1687 rbd_assert(objno < rbd_dev->object_map_size);
1688 *index = div_u64_rem(objno, OBJS_PER_BYTE, &off);
1689 *shift = (OBJS_PER_BYTE - off - 1) * BITS_PER_OBJ;
1692 static u8 __rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1697 lockdep_assert_held(&rbd_dev->object_map_lock);
1698 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1699 return (rbd_dev->object_map[index] >> shift) & OBJ_MASK;
1702 static void __rbd_object_map_set(struct rbd_device *rbd_dev, u64 objno, u8 val)
1708 lockdep_assert_held(&rbd_dev->object_map_lock);
1709 rbd_assert(!(val & ~OBJ_MASK));
1711 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1712 p = &rbd_dev->object_map[index];
1713 *p = (*p & ~(OBJ_MASK << shift)) | (val << shift);
1716 static u8 rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1720 spin_lock(&rbd_dev->object_map_lock);
1721 state = __rbd_object_map_get(rbd_dev, objno);
1722 spin_unlock(&rbd_dev->object_map_lock);
1726 static bool use_object_map(struct rbd_device *rbd_dev)
1729 * An image mapped read-only can't use the object map -- it isn't
1730 * loaded because the header lock isn't acquired. Someone else can
1731 * write to the image and update the object map behind our back.
1733 * A snapshot can't be written to, so using the object map is always
1736 if (!rbd_is_snap(rbd_dev) && rbd_is_ro(rbd_dev))
1739 return ((rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) &&
1740 !(rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID));
1743 static bool rbd_object_map_may_exist(struct rbd_device *rbd_dev, u64 objno)
1747 /* fall back to default logic if object map is disabled or invalid */
1748 if (!use_object_map(rbd_dev))
1751 state = rbd_object_map_get(rbd_dev, objno);
1752 return state != OBJECT_NONEXISTENT;
1755 static void rbd_object_map_name(struct rbd_device *rbd_dev, u64 snap_id,
1756 struct ceph_object_id *oid)
1758 if (snap_id == CEPH_NOSNAP)
1759 ceph_oid_printf(oid, "%s%s", RBD_OBJECT_MAP_PREFIX,
1760 rbd_dev->spec->image_id);
1762 ceph_oid_printf(oid, "%s%s.%016llx", RBD_OBJECT_MAP_PREFIX,
1763 rbd_dev->spec->image_id, snap_id);
1766 static int rbd_object_map_lock(struct rbd_device *rbd_dev)
1768 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1769 CEPH_DEFINE_OID_ONSTACK(oid);
1772 struct ceph_locker *lockers;
1774 bool broke_lock = false;
1777 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1780 ret = ceph_cls_lock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1781 CEPH_CLS_LOCK_EXCLUSIVE, "", "", "", 0);
1782 if (ret != -EBUSY || broke_lock) {
1784 ret = 0; /* already locked by myself */
1786 rbd_warn(rbd_dev, "failed to lock object map: %d", ret);
1790 ret = ceph_cls_lock_info(osdc, &oid, &rbd_dev->header_oloc,
1791 RBD_LOCK_NAME, &lock_type, &lock_tag,
1792 &lockers, &num_lockers);
1797 rbd_warn(rbd_dev, "failed to get object map lockers: %d", ret);
1802 if (num_lockers == 0)
1805 rbd_warn(rbd_dev, "breaking object map lock owned by %s%llu",
1806 ENTITY_NAME(lockers[0].id.name));
1808 ret = ceph_cls_break_lock(osdc, &oid, &rbd_dev->header_oloc,
1809 RBD_LOCK_NAME, lockers[0].id.cookie,
1810 &lockers[0].id.name);
1811 ceph_free_lockers(lockers, num_lockers);
1816 rbd_warn(rbd_dev, "failed to break object map lock: %d", ret);
1824 static void rbd_object_map_unlock(struct rbd_device *rbd_dev)
1826 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1827 CEPH_DEFINE_OID_ONSTACK(oid);
1830 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1832 ret = ceph_cls_unlock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1834 if (ret && ret != -ENOENT)
1835 rbd_warn(rbd_dev, "failed to unlock object map: %d", ret);
1838 static int decode_object_map_header(void **p, void *end, u64 *object_map_size)
1846 ceph_decode_32_safe(p, end, header_len, e_inval);
1847 header_end = *p + header_len;
1849 ret = ceph_start_decoding(p, end, 1, "BitVector header", &struct_v,
1854 ceph_decode_64_safe(p, end, *object_map_size, e_inval);
1863 static int __rbd_object_map_load(struct rbd_device *rbd_dev)
1865 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1866 CEPH_DEFINE_OID_ONSTACK(oid);
1867 struct page **pages;
1871 u64 object_map_bytes;
1872 u64 object_map_size;
1876 rbd_assert(!rbd_dev->object_map && !rbd_dev->object_map_size);
1878 num_objects = ceph_get_num_objects(&rbd_dev->layout,
1879 rbd_dev->mapping.size);
1880 object_map_bytes = DIV_ROUND_UP_ULL(num_objects * BITS_PER_OBJ,
1882 num_pages = calc_pages_for(0, object_map_bytes) + 1;
1883 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1885 return PTR_ERR(pages);
1887 reply_len = num_pages * PAGE_SIZE;
1888 rbd_object_map_name(rbd_dev, rbd_dev->spec->snap_id, &oid);
1889 ret = ceph_osdc_call(osdc, &oid, &rbd_dev->header_oloc,
1890 "rbd", "object_map_load", CEPH_OSD_FLAG_READ,
1891 NULL, 0, pages, &reply_len);
1895 p = page_address(pages[0]);
1896 end = p + min(reply_len, (size_t)PAGE_SIZE);
1897 ret = decode_object_map_header(&p, end, &object_map_size);
1901 if (object_map_size != num_objects) {
1902 rbd_warn(rbd_dev, "object map size mismatch: %llu vs %llu",
1903 object_map_size, num_objects);
1908 if (offset_in_page(p) + object_map_bytes > reply_len) {
1913 rbd_dev->object_map = kvmalloc(object_map_bytes, GFP_KERNEL);
1914 if (!rbd_dev->object_map) {
1919 rbd_dev->object_map_size = object_map_size;
1920 ceph_copy_from_page_vector(pages, rbd_dev->object_map,
1921 offset_in_page(p), object_map_bytes);
1924 ceph_release_page_vector(pages, num_pages);
1928 static void rbd_object_map_free(struct rbd_device *rbd_dev)
1930 kvfree(rbd_dev->object_map);
1931 rbd_dev->object_map = NULL;
1932 rbd_dev->object_map_size = 0;
1935 static int rbd_object_map_load(struct rbd_device *rbd_dev)
1939 ret = __rbd_object_map_load(rbd_dev);
1943 ret = rbd_dev_v2_get_flags(rbd_dev);
1945 rbd_object_map_free(rbd_dev);
1949 if (rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID)
1950 rbd_warn(rbd_dev, "object map is invalid");
1955 static int rbd_object_map_open(struct rbd_device *rbd_dev)
1959 ret = rbd_object_map_lock(rbd_dev);
1963 ret = rbd_object_map_load(rbd_dev);
1965 rbd_object_map_unlock(rbd_dev);
1972 static void rbd_object_map_close(struct rbd_device *rbd_dev)
1974 rbd_object_map_free(rbd_dev);
1975 rbd_object_map_unlock(rbd_dev);
1979 * This function needs snap_id (or more precisely just something to
1980 * distinguish between HEAD and snapshot object maps), new_state and
1981 * current_state that were passed to rbd_object_map_update().
1983 * To avoid allocating and stashing a context we piggyback on the OSD
1984 * request. A HEAD update has two ops (assert_locked). For new_state
1985 * and current_state we decode our own object_map_update op, encoded in
1986 * rbd_cls_object_map_update().
1988 static int rbd_object_map_update_finish(struct rbd_obj_request *obj_req,
1989 struct ceph_osd_request *osd_req)
1991 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1992 struct ceph_osd_data *osd_data;
1994 u8 state, new_state, uninitialized_var(current_state);
1995 bool has_current_state;
1998 if (osd_req->r_result)
1999 return osd_req->r_result;
2002 * Nothing to do for a snapshot object map.
2004 if (osd_req->r_num_ops == 1)
2008 * Update in-memory HEAD object map.
2010 rbd_assert(osd_req->r_num_ops == 2);
2011 osd_data = osd_req_op_data(osd_req, 1, cls, request_data);
2012 rbd_assert(osd_data->type == CEPH_OSD_DATA_TYPE_PAGES);
2014 p = page_address(osd_data->pages[0]);
2015 objno = ceph_decode_64(&p);
2016 rbd_assert(objno == obj_req->ex.oe_objno);
2017 rbd_assert(ceph_decode_64(&p) == objno + 1);
2018 new_state = ceph_decode_8(&p);
2019 has_current_state = ceph_decode_8(&p);
2020 if (has_current_state)
2021 current_state = ceph_decode_8(&p);
2023 spin_lock(&rbd_dev->object_map_lock);
2024 state = __rbd_object_map_get(rbd_dev, objno);
2025 if (!has_current_state || current_state == state ||
2026 (current_state == OBJECT_EXISTS && state == OBJECT_EXISTS_CLEAN))
2027 __rbd_object_map_set(rbd_dev, objno, new_state);
2028 spin_unlock(&rbd_dev->object_map_lock);
2033 static void rbd_object_map_callback(struct ceph_osd_request *osd_req)
2035 struct rbd_obj_request *obj_req = osd_req->r_priv;
2038 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
2039 osd_req->r_result, obj_req);
2041 result = rbd_object_map_update_finish(obj_req, osd_req);
2042 rbd_obj_handle_request(obj_req, result);
2045 static bool update_needed(struct rbd_device *rbd_dev, u64 objno, u8 new_state)
2047 u8 state = rbd_object_map_get(rbd_dev, objno);
2049 if (state == new_state ||
2050 (new_state == OBJECT_PENDING && state == OBJECT_NONEXISTENT) ||
2051 (new_state == OBJECT_NONEXISTENT && state != OBJECT_PENDING))
2057 static int rbd_cls_object_map_update(struct ceph_osd_request *req,
2058 int which, u64 objno, u8 new_state,
2059 const u8 *current_state)
2061 struct page **pages;
2065 ret = osd_req_op_cls_init(req, which, "rbd", "object_map_update");
2069 pages = ceph_alloc_page_vector(1, GFP_NOIO);
2071 return PTR_ERR(pages);
2073 p = start = page_address(pages[0]);
2074 ceph_encode_64(&p, objno);
2075 ceph_encode_64(&p, objno + 1);
2076 ceph_encode_8(&p, new_state);
2077 if (current_state) {
2078 ceph_encode_8(&p, 1);
2079 ceph_encode_8(&p, *current_state);
2081 ceph_encode_8(&p, 0);
2084 osd_req_op_cls_request_data_pages(req, which, pages, p - start, 0,
2091 * 0 - object map update sent
2092 * 1 - object map update isn't needed
2095 static int rbd_object_map_update(struct rbd_obj_request *obj_req, u64 snap_id,
2096 u8 new_state, const u8 *current_state)
2098 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2099 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2100 struct ceph_osd_request *req;
2105 if (snap_id == CEPH_NOSNAP) {
2106 if (!update_needed(rbd_dev, obj_req->ex.oe_objno, new_state))
2109 num_ops++; /* assert_locked */
2112 req = ceph_osdc_alloc_request(osdc, NULL, num_ops, false, GFP_NOIO);
2116 list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
2117 req->r_callback = rbd_object_map_callback;
2118 req->r_priv = obj_req;
2120 rbd_object_map_name(rbd_dev, snap_id, &req->r_base_oid);
2121 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
2122 req->r_flags = CEPH_OSD_FLAG_WRITE;
2123 ktime_get_real_ts64(&req->r_mtime);
2125 if (snap_id == CEPH_NOSNAP) {
2127 * Protect against possible race conditions during lock
2128 * ownership transitions.
2130 ret = ceph_cls_assert_locked(req, which++, RBD_LOCK_NAME,
2131 CEPH_CLS_LOCK_EXCLUSIVE, "", "");
2136 ret = rbd_cls_object_map_update(req, which, obj_req->ex.oe_objno,
2137 new_state, current_state);
2141 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
2145 ceph_osdc_start_request(osdc, req, false);
2149 static void prune_extents(struct ceph_file_extent *img_extents,
2150 u32 *num_img_extents, u64 overlap)
2152 u32 cnt = *num_img_extents;
2154 /* drop extents completely beyond the overlap */
2155 while (cnt && img_extents[cnt - 1].fe_off >= overlap)
2159 struct ceph_file_extent *ex = &img_extents[cnt - 1];
2161 /* trim final overlapping extent */
2162 if (ex->fe_off + ex->fe_len > overlap)
2163 ex->fe_len = overlap - ex->fe_off;
2166 *num_img_extents = cnt;
2170 * Determine the byte range(s) covered by either just the object extent
2171 * or the entire object in the parent image.
2173 static int rbd_obj_calc_img_extents(struct rbd_obj_request *obj_req,
2176 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2179 if (!rbd_dev->parent_overlap)
2182 ret = ceph_extent_to_file(&rbd_dev->layout, obj_req->ex.oe_objno,
2183 entire ? 0 : obj_req->ex.oe_off,
2184 entire ? rbd_dev->layout.object_size :
2186 &obj_req->img_extents,
2187 &obj_req->num_img_extents);
2191 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
2192 rbd_dev->parent_overlap);
2196 static void rbd_osd_setup_data(struct ceph_osd_request *osd_req, int which)
2198 struct rbd_obj_request *obj_req = osd_req->r_priv;
2200 switch (obj_req->img_request->data_type) {
2201 case OBJ_REQUEST_BIO:
2202 osd_req_op_extent_osd_data_bio(osd_req, which,
2204 obj_req->ex.oe_len);
2206 case OBJ_REQUEST_BVECS:
2207 case OBJ_REQUEST_OWN_BVECS:
2208 rbd_assert(obj_req->bvec_pos.iter.bi_size ==
2209 obj_req->ex.oe_len);
2210 rbd_assert(obj_req->bvec_idx == obj_req->bvec_count);
2211 osd_req_op_extent_osd_data_bvec_pos(osd_req, which,
2212 &obj_req->bvec_pos);
2219 static int rbd_osd_setup_stat(struct ceph_osd_request *osd_req, int which)
2221 struct page **pages;
2224 * The response data for a STAT call consists of:
2231 pages = ceph_alloc_page_vector(1, GFP_NOIO);
2233 return PTR_ERR(pages);
2235 osd_req_op_init(osd_req, which, CEPH_OSD_OP_STAT, 0);
2236 osd_req_op_raw_data_in_pages(osd_req, which, pages,
2237 8 + sizeof(struct ceph_timespec),
2242 static int rbd_osd_setup_copyup(struct ceph_osd_request *osd_req, int which,
2245 struct rbd_obj_request *obj_req = osd_req->r_priv;
2248 ret = osd_req_op_cls_init(osd_req, which, "rbd", "copyup");
2252 osd_req_op_cls_request_data_bvecs(osd_req, which, obj_req->copyup_bvecs,
2253 obj_req->copyup_bvec_count, bytes);
2257 static int rbd_obj_init_read(struct rbd_obj_request *obj_req)
2259 obj_req->read_state = RBD_OBJ_READ_START;
2263 static void __rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
2266 struct rbd_obj_request *obj_req = osd_req->r_priv;
2267 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2270 if (!use_object_map(rbd_dev) ||
2271 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST)) {
2272 osd_req_op_alloc_hint_init(osd_req, which++,
2273 rbd_dev->layout.object_size,
2274 rbd_dev->layout.object_size,
2275 rbd_dev->opts->alloc_hint_flags);
2278 if (rbd_obj_is_entire(obj_req))
2279 opcode = CEPH_OSD_OP_WRITEFULL;
2281 opcode = CEPH_OSD_OP_WRITE;
2283 osd_req_op_extent_init(osd_req, which, opcode,
2284 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
2285 rbd_osd_setup_data(osd_req, which);
2288 static int rbd_obj_init_write(struct rbd_obj_request *obj_req)
2292 /* reverse map the entire object onto the parent */
2293 ret = rbd_obj_calc_img_extents(obj_req, true);
2297 if (rbd_obj_copyup_enabled(obj_req))
2298 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
2300 obj_req->write_state = RBD_OBJ_WRITE_START;
2304 static u16 truncate_or_zero_opcode(struct rbd_obj_request *obj_req)
2306 return rbd_obj_is_tail(obj_req) ? CEPH_OSD_OP_TRUNCATE :
2310 static void __rbd_osd_setup_discard_ops(struct ceph_osd_request *osd_req,
2313 struct rbd_obj_request *obj_req = osd_req->r_priv;
2315 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents) {
2316 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
2317 osd_req_op_init(osd_req, which, CEPH_OSD_OP_DELETE, 0);
2319 osd_req_op_extent_init(osd_req, which,
2320 truncate_or_zero_opcode(obj_req),
2321 obj_req->ex.oe_off, obj_req->ex.oe_len,
2326 static int rbd_obj_init_discard(struct rbd_obj_request *obj_req)
2328 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2333 * Align the range to alloc_size boundary and punt on discards
2334 * that are too small to free up any space.
2336 * alloc_size == object_size && is_tail() is a special case for
2337 * filestore with filestore_punch_hole = false, needed to allow
2338 * truncate (in addition to delete).
2340 if (rbd_dev->opts->alloc_size != rbd_dev->layout.object_size ||
2341 !rbd_obj_is_tail(obj_req)) {
2342 off = round_up(obj_req->ex.oe_off, rbd_dev->opts->alloc_size);
2343 next_off = round_down(obj_req->ex.oe_off + obj_req->ex.oe_len,
2344 rbd_dev->opts->alloc_size);
2345 if (off >= next_off)
2348 dout("%s %p %llu~%llu -> %llu~%llu\n", __func__,
2349 obj_req, obj_req->ex.oe_off, obj_req->ex.oe_len,
2350 off, next_off - off);
2351 obj_req->ex.oe_off = off;
2352 obj_req->ex.oe_len = next_off - off;
2355 /* reverse map the entire object onto the parent */
2356 ret = rbd_obj_calc_img_extents(obj_req, true);
2360 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
2361 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents)
2362 obj_req->flags |= RBD_OBJ_FLAG_DELETION;
2364 obj_req->write_state = RBD_OBJ_WRITE_START;
2368 static void __rbd_osd_setup_zeroout_ops(struct ceph_osd_request *osd_req,
2371 struct rbd_obj_request *obj_req = osd_req->r_priv;
2374 if (rbd_obj_is_entire(obj_req)) {
2375 if (obj_req->num_img_extents) {
2376 if (!(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
2377 osd_req_op_init(osd_req, which++,
2378 CEPH_OSD_OP_CREATE, 0);
2379 opcode = CEPH_OSD_OP_TRUNCATE;
2381 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
2382 osd_req_op_init(osd_req, which++,
2383 CEPH_OSD_OP_DELETE, 0);
2387 opcode = truncate_or_zero_opcode(obj_req);
2391 osd_req_op_extent_init(osd_req, which, opcode,
2392 obj_req->ex.oe_off, obj_req->ex.oe_len,
2396 static int rbd_obj_init_zeroout(struct rbd_obj_request *obj_req)
2400 /* reverse map the entire object onto the parent */
2401 ret = rbd_obj_calc_img_extents(obj_req, true);
2405 if (rbd_obj_copyup_enabled(obj_req))
2406 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
2407 if (!obj_req->num_img_extents) {
2408 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
2409 if (rbd_obj_is_entire(obj_req))
2410 obj_req->flags |= RBD_OBJ_FLAG_DELETION;
2413 obj_req->write_state = RBD_OBJ_WRITE_START;
2417 static int count_write_ops(struct rbd_obj_request *obj_req)
2419 struct rbd_img_request *img_req = obj_req->img_request;
2421 switch (img_req->op_type) {
2423 if (!use_object_map(img_req->rbd_dev) ||
2424 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST))
2425 return 2; /* setallochint + write/writefull */
2427 return 1; /* write/writefull */
2428 case OBJ_OP_DISCARD:
2429 return 1; /* delete/truncate/zero */
2430 case OBJ_OP_ZEROOUT:
2431 if (rbd_obj_is_entire(obj_req) && obj_req->num_img_extents &&
2432 !(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
2433 return 2; /* create + truncate */
2435 return 1; /* delete/truncate/zero */
2441 static void rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
2444 struct rbd_obj_request *obj_req = osd_req->r_priv;
2446 switch (obj_req->img_request->op_type) {
2448 __rbd_osd_setup_write_ops(osd_req, which);
2450 case OBJ_OP_DISCARD:
2451 __rbd_osd_setup_discard_ops(osd_req, which);
2453 case OBJ_OP_ZEROOUT:
2454 __rbd_osd_setup_zeroout_ops(osd_req, which);
2462 * Prune the list of object requests (adjust offset and/or length, drop
2463 * redundant requests). Prepare object request state machines and image
2464 * request state machine for execution.
2466 static int __rbd_img_fill_request(struct rbd_img_request *img_req)
2468 struct rbd_obj_request *obj_req, *next_obj_req;
2471 for_each_obj_request_safe(img_req, obj_req, next_obj_req) {
2472 switch (img_req->op_type) {
2474 ret = rbd_obj_init_read(obj_req);
2477 ret = rbd_obj_init_write(obj_req);
2479 case OBJ_OP_DISCARD:
2480 ret = rbd_obj_init_discard(obj_req);
2482 case OBJ_OP_ZEROOUT:
2483 ret = rbd_obj_init_zeroout(obj_req);
2491 rbd_img_obj_request_del(img_req, obj_req);
2496 img_req->state = RBD_IMG_START;
2500 union rbd_img_fill_iter {
2501 struct ceph_bio_iter bio_iter;
2502 struct ceph_bvec_iter bvec_iter;
2505 struct rbd_img_fill_ctx {
2506 enum obj_request_type pos_type;
2507 union rbd_img_fill_iter *pos;
2508 union rbd_img_fill_iter iter;
2509 ceph_object_extent_fn_t set_pos_fn;
2510 ceph_object_extent_fn_t count_fn;
2511 ceph_object_extent_fn_t copy_fn;
2514 static struct ceph_object_extent *alloc_object_extent(void *arg)
2516 struct rbd_img_request *img_req = arg;
2517 struct rbd_obj_request *obj_req;
2519 obj_req = rbd_obj_request_create();
2523 rbd_img_obj_request_add(img_req, obj_req);
2524 return &obj_req->ex;
2528 * While su != os && sc == 1 is technically not fancy (it's the same
2529 * layout as su == os && sc == 1), we can't use the nocopy path for it
2530 * because ->set_pos_fn() should be called only once per object.
2531 * ceph_file_to_extents() invokes action_fn once per stripe unit, so
2532 * treat su != os && sc == 1 as fancy.
2534 static bool rbd_layout_is_fancy(struct ceph_file_layout *l)
2536 return l->stripe_unit != l->object_size;
2539 static int rbd_img_fill_request_nocopy(struct rbd_img_request *img_req,
2540 struct ceph_file_extent *img_extents,
2541 u32 num_img_extents,
2542 struct rbd_img_fill_ctx *fctx)
2547 img_req->data_type = fctx->pos_type;
2550 * Create object requests and set each object request's starting
2551 * position in the provided bio (list) or bio_vec array.
2553 fctx->iter = *fctx->pos;
2554 for (i = 0; i < num_img_extents; i++) {
2555 ret = ceph_file_to_extents(&img_req->rbd_dev->layout,
2556 img_extents[i].fe_off,
2557 img_extents[i].fe_len,
2558 &img_req->object_extents,
2559 alloc_object_extent, img_req,
2560 fctx->set_pos_fn, &fctx->iter);
2565 return __rbd_img_fill_request(img_req);
2569 * Map a list of image extents to a list of object extents, create the
2570 * corresponding object requests (normally each to a different object,
2571 * but not always) and add them to @img_req. For each object request,
2572 * set up its data descriptor to point to the corresponding chunk(s) of
2573 * @fctx->pos data buffer.
2575 * Because ceph_file_to_extents() will merge adjacent object extents
2576 * together, each object request's data descriptor may point to multiple
2577 * different chunks of @fctx->pos data buffer.
2579 * @fctx->pos data buffer is assumed to be large enough.
2581 static int rbd_img_fill_request(struct rbd_img_request *img_req,
2582 struct ceph_file_extent *img_extents,
2583 u32 num_img_extents,
2584 struct rbd_img_fill_ctx *fctx)
2586 struct rbd_device *rbd_dev = img_req->rbd_dev;
2587 struct rbd_obj_request *obj_req;
2591 if (fctx->pos_type == OBJ_REQUEST_NODATA ||
2592 !rbd_layout_is_fancy(&rbd_dev->layout))
2593 return rbd_img_fill_request_nocopy(img_req, img_extents,
2594 num_img_extents, fctx);
2596 img_req->data_type = OBJ_REQUEST_OWN_BVECS;
2599 * Create object requests and determine ->bvec_count for each object
2600 * request. Note that ->bvec_count sum over all object requests may
2601 * be greater than the number of bio_vecs in the provided bio (list)
2602 * or bio_vec array because when mapped, those bio_vecs can straddle
2603 * stripe unit boundaries.
2605 fctx->iter = *fctx->pos;
2606 for (i = 0; i < num_img_extents; i++) {
2607 ret = ceph_file_to_extents(&rbd_dev->layout,
2608 img_extents[i].fe_off,
2609 img_extents[i].fe_len,
2610 &img_req->object_extents,
2611 alloc_object_extent, img_req,
2612 fctx->count_fn, &fctx->iter);
2617 for_each_obj_request(img_req, obj_req) {
2618 obj_req->bvec_pos.bvecs = kmalloc_array(obj_req->bvec_count,
2619 sizeof(*obj_req->bvec_pos.bvecs),
2621 if (!obj_req->bvec_pos.bvecs)
2626 * Fill in each object request's private bio_vec array, splitting and
2627 * rearranging the provided bio_vecs in stripe unit chunks as needed.
2629 fctx->iter = *fctx->pos;
2630 for (i = 0; i < num_img_extents; i++) {
2631 ret = ceph_iterate_extents(&rbd_dev->layout,
2632 img_extents[i].fe_off,
2633 img_extents[i].fe_len,
2634 &img_req->object_extents,
2635 fctx->copy_fn, &fctx->iter);
2640 return __rbd_img_fill_request(img_req);
2643 static int rbd_img_fill_nodata(struct rbd_img_request *img_req,
2646 struct ceph_file_extent ex = { off, len };
2647 union rbd_img_fill_iter dummy = {};
2648 struct rbd_img_fill_ctx fctx = {
2649 .pos_type = OBJ_REQUEST_NODATA,
2653 return rbd_img_fill_request(img_req, &ex, 1, &fctx);
2656 static void set_bio_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2658 struct rbd_obj_request *obj_req =
2659 container_of(ex, struct rbd_obj_request, ex);
2660 struct ceph_bio_iter *it = arg;
2662 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2663 obj_req->bio_pos = *it;
2664 ceph_bio_iter_advance(it, bytes);
2667 static void count_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2669 struct rbd_obj_request *obj_req =
2670 container_of(ex, struct rbd_obj_request, ex);
2671 struct ceph_bio_iter *it = arg;
2673 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2674 ceph_bio_iter_advance_step(it, bytes, ({
2675 obj_req->bvec_count++;
2680 static void copy_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2682 struct rbd_obj_request *obj_req =
2683 container_of(ex, struct rbd_obj_request, ex);
2684 struct ceph_bio_iter *it = arg;
2686 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2687 ceph_bio_iter_advance_step(it, bytes, ({
2688 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2689 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2693 static int __rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2694 struct ceph_file_extent *img_extents,
2695 u32 num_img_extents,
2696 struct ceph_bio_iter *bio_pos)
2698 struct rbd_img_fill_ctx fctx = {
2699 .pos_type = OBJ_REQUEST_BIO,
2700 .pos = (union rbd_img_fill_iter *)bio_pos,
2701 .set_pos_fn = set_bio_pos,
2702 .count_fn = count_bio_bvecs,
2703 .copy_fn = copy_bio_bvecs,
2706 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2710 static int rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2711 u64 off, u64 len, struct bio *bio)
2713 struct ceph_file_extent ex = { off, len };
2714 struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter };
2716 return __rbd_img_fill_from_bio(img_req, &ex, 1, &it);
2719 static void set_bvec_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2721 struct rbd_obj_request *obj_req =
2722 container_of(ex, struct rbd_obj_request, ex);
2723 struct ceph_bvec_iter *it = arg;
2725 obj_req->bvec_pos = *it;
2726 ceph_bvec_iter_shorten(&obj_req->bvec_pos, bytes);
2727 ceph_bvec_iter_advance(it, bytes);
2730 static void count_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2732 struct rbd_obj_request *obj_req =
2733 container_of(ex, struct rbd_obj_request, ex);
2734 struct ceph_bvec_iter *it = arg;
2736 ceph_bvec_iter_advance_step(it, bytes, ({
2737 obj_req->bvec_count++;
2741 static void copy_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2743 struct rbd_obj_request *obj_req =
2744 container_of(ex, struct rbd_obj_request, ex);
2745 struct ceph_bvec_iter *it = arg;
2747 ceph_bvec_iter_advance_step(it, bytes, ({
2748 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2749 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2753 static int __rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2754 struct ceph_file_extent *img_extents,
2755 u32 num_img_extents,
2756 struct ceph_bvec_iter *bvec_pos)
2758 struct rbd_img_fill_ctx fctx = {
2759 .pos_type = OBJ_REQUEST_BVECS,
2760 .pos = (union rbd_img_fill_iter *)bvec_pos,
2761 .set_pos_fn = set_bvec_pos,
2762 .count_fn = count_bvecs,
2763 .copy_fn = copy_bvecs,
2766 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2770 static int rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2771 struct ceph_file_extent *img_extents,
2772 u32 num_img_extents,
2773 struct bio_vec *bvecs)
2775 struct ceph_bvec_iter it = {
2777 .iter = { .bi_size = ceph_file_extents_bytes(img_extents,
2781 return __rbd_img_fill_from_bvecs(img_req, img_extents, num_img_extents,
2785 static void rbd_img_handle_request_work(struct work_struct *work)
2787 struct rbd_img_request *img_req =
2788 container_of(work, struct rbd_img_request, work);
2790 rbd_img_handle_request(img_req, img_req->work_result);
2793 static void rbd_img_schedule(struct rbd_img_request *img_req, int result)
2795 INIT_WORK(&img_req->work, rbd_img_handle_request_work);
2796 img_req->work_result = result;
2797 queue_work(rbd_wq, &img_req->work);
2800 static bool rbd_obj_may_exist(struct rbd_obj_request *obj_req)
2802 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2804 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno)) {
2805 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
2809 dout("%s %p objno %llu assuming dne\n", __func__, obj_req,
2810 obj_req->ex.oe_objno);
2814 static int rbd_obj_read_object(struct rbd_obj_request *obj_req)
2816 struct ceph_osd_request *osd_req;
2819 osd_req = __rbd_obj_add_osd_request(obj_req, NULL, 1);
2820 if (IS_ERR(osd_req))
2821 return PTR_ERR(osd_req);
2823 osd_req_op_extent_init(osd_req, 0, CEPH_OSD_OP_READ,
2824 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
2825 rbd_osd_setup_data(osd_req, 0);
2826 rbd_osd_format_read(osd_req);
2828 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
2832 rbd_osd_submit(osd_req);
2836 static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
2838 struct rbd_img_request *img_req = obj_req->img_request;
2839 struct rbd_device *parent = img_req->rbd_dev->parent;
2840 struct rbd_img_request *child_img_req;
2843 child_img_req = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
2847 rbd_img_request_init(child_img_req, parent, OBJ_OP_READ);
2848 __set_bit(IMG_REQ_CHILD, &child_img_req->flags);
2849 child_img_req->obj_request = obj_req;
2851 down_read(&parent->header_rwsem);
2852 rbd_img_capture_header(child_img_req);
2853 up_read(&parent->header_rwsem);
2855 dout("%s child_img_req %p for obj_req %p\n", __func__, child_img_req,
2858 if (!rbd_img_is_write(img_req)) {
2859 switch (img_req->data_type) {
2860 case OBJ_REQUEST_BIO:
2861 ret = __rbd_img_fill_from_bio(child_img_req,
2862 obj_req->img_extents,
2863 obj_req->num_img_extents,
2866 case OBJ_REQUEST_BVECS:
2867 case OBJ_REQUEST_OWN_BVECS:
2868 ret = __rbd_img_fill_from_bvecs(child_img_req,
2869 obj_req->img_extents,
2870 obj_req->num_img_extents,
2871 &obj_req->bvec_pos);
2877 ret = rbd_img_fill_from_bvecs(child_img_req,
2878 obj_req->img_extents,
2879 obj_req->num_img_extents,
2880 obj_req->copyup_bvecs);
2883 rbd_img_request_destroy(child_img_req);
2887 /* avoid parent chain recursion */
2888 rbd_img_schedule(child_img_req, 0);
2892 static bool rbd_obj_advance_read(struct rbd_obj_request *obj_req, int *result)
2894 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2898 switch (obj_req->read_state) {
2899 case RBD_OBJ_READ_START:
2900 rbd_assert(!*result);
2902 if (!rbd_obj_may_exist(obj_req)) {
2904 obj_req->read_state = RBD_OBJ_READ_OBJECT;
2908 ret = rbd_obj_read_object(obj_req);
2913 obj_req->read_state = RBD_OBJ_READ_OBJECT;
2915 case RBD_OBJ_READ_OBJECT:
2916 if (*result == -ENOENT && rbd_dev->parent_overlap) {
2917 /* reverse map this object extent onto the parent */
2918 ret = rbd_obj_calc_img_extents(obj_req, false);
2923 if (obj_req->num_img_extents) {
2924 ret = rbd_obj_read_from_parent(obj_req);
2929 obj_req->read_state = RBD_OBJ_READ_PARENT;
2935 * -ENOENT means a hole in the image -- zero-fill the entire
2936 * length of the request. A short read also implies zero-fill
2937 * to the end of the request.
2939 if (*result == -ENOENT) {
2940 rbd_obj_zero_range(obj_req, 0, obj_req->ex.oe_len);
2942 } else if (*result >= 0) {
2943 if (*result < obj_req->ex.oe_len)
2944 rbd_obj_zero_range(obj_req, *result,
2945 obj_req->ex.oe_len - *result);
2947 rbd_assert(*result == obj_req->ex.oe_len);
2951 case RBD_OBJ_READ_PARENT:
2953 * The parent image is read only up to the overlap -- zero-fill
2954 * from the overlap to the end of the request.
2957 u32 obj_overlap = rbd_obj_img_extents_bytes(obj_req);
2959 if (obj_overlap < obj_req->ex.oe_len)
2960 rbd_obj_zero_range(obj_req, obj_overlap,
2961 obj_req->ex.oe_len - obj_overlap);
2969 static bool rbd_obj_write_is_noop(struct rbd_obj_request *obj_req)
2971 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2973 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno))
2974 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
2976 if (!(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST) &&
2977 (obj_req->flags & RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT)) {
2978 dout("%s %p noop for nonexistent\n", __func__, obj_req);
2987 * 0 - object map update sent
2988 * 1 - object map update isn't needed
2991 static int rbd_obj_write_pre_object_map(struct rbd_obj_request *obj_req)
2993 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2996 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
2999 if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
3000 new_state = OBJECT_PENDING;
3002 new_state = OBJECT_EXISTS;
3004 return rbd_object_map_update(obj_req, CEPH_NOSNAP, new_state, NULL);
3007 static int rbd_obj_write_object(struct rbd_obj_request *obj_req)
3009 struct ceph_osd_request *osd_req;
3010 int num_ops = count_write_ops(obj_req);
3014 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED)
3015 num_ops++; /* stat */
3017 osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
3018 if (IS_ERR(osd_req))
3019 return PTR_ERR(osd_req);
3021 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
3022 ret = rbd_osd_setup_stat(osd_req, which++);
3027 rbd_osd_setup_write_ops(osd_req, which);
3028 rbd_osd_format_write(osd_req);
3030 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3034 rbd_osd_submit(osd_req);
3039 * copyup_bvecs pages are never highmem pages
3041 static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes)
3043 struct ceph_bvec_iter it = {
3045 .iter = { .bi_size = bytes },
3048 ceph_bvec_iter_advance_step(&it, bytes, ({
3049 if (memchr_inv(page_address(bv.bv_page) + bv.bv_offset, 0,
3056 #define MODS_ONLY U32_MAX
3058 static int rbd_obj_copyup_empty_snapc(struct rbd_obj_request *obj_req,
3061 struct ceph_osd_request *osd_req;
3064 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
3065 rbd_assert(bytes > 0 && bytes != MODS_ONLY);
3067 osd_req = __rbd_obj_add_osd_request(obj_req, &rbd_empty_snapc, 1);
3068 if (IS_ERR(osd_req))
3069 return PTR_ERR(osd_req);
3071 ret = rbd_osd_setup_copyup(osd_req, 0, bytes);
3075 rbd_osd_format_write(osd_req);
3077 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3081 rbd_osd_submit(osd_req);
3085 static int rbd_obj_copyup_current_snapc(struct rbd_obj_request *obj_req,
3088 struct ceph_osd_request *osd_req;
3089 int num_ops = count_write_ops(obj_req);
3093 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
3095 if (bytes != MODS_ONLY)
3096 num_ops++; /* copyup */
3098 osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
3099 if (IS_ERR(osd_req))
3100 return PTR_ERR(osd_req);
3102 if (bytes != MODS_ONLY) {
3103 ret = rbd_osd_setup_copyup(osd_req, which++, bytes);
3108 rbd_osd_setup_write_ops(osd_req, which);
3109 rbd_osd_format_write(osd_req);
3111 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3115 rbd_osd_submit(osd_req);
3119 static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap)
3123 rbd_assert(!obj_req->copyup_bvecs);
3124 obj_req->copyup_bvec_count = calc_pages_for(0, obj_overlap);
3125 obj_req->copyup_bvecs = kcalloc(obj_req->copyup_bvec_count,
3126 sizeof(*obj_req->copyup_bvecs),
3128 if (!obj_req->copyup_bvecs)
3131 for (i = 0; i < obj_req->copyup_bvec_count; i++) {
3132 unsigned int len = min(obj_overlap, (u64)PAGE_SIZE);
3134 obj_req->copyup_bvecs[i].bv_page = alloc_page(GFP_NOIO);
3135 if (!obj_req->copyup_bvecs[i].bv_page)
3138 obj_req->copyup_bvecs[i].bv_offset = 0;
3139 obj_req->copyup_bvecs[i].bv_len = len;
3143 rbd_assert(!obj_overlap);
3148 * The target object doesn't exist. Read the data for the entire
3149 * target object up to the overlap point (if any) from the parent,
3150 * so we can use it for a copyup.
3152 static int rbd_obj_copyup_read_parent(struct rbd_obj_request *obj_req)
3154 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3157 rbd_assert(obj_req->num_img_extents);
3158 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
3159 rbd_dev->parent_overlap);
3160 if (!obj_req->num_img_extents) {
3162 * The overlap has become 0 (most likely because the
3163 * image has been flattened). Re-submit the original write
3164 * request -- pass MODS_ONLY since the copyup isn't needed
3167 return rbd_obj_copyup_current_snapc(obj_req, MODS_ONLY);
3170 ret = setup_copyup_bvecs(obj_req, rbd_obj_img_extents_bytes(obj_req));
3174 return rbd_obj_read_from_parent(obj_req);
3177 static void rbd_obj_copyup_object_maps(struct rbd_obj_request *obj_req)
3179 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3180 struct ceph_snap_context *snapc = obj_req->img_request->snapc;
3185 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
3187 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3190 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3193 for (i = 0; i < snapc->num_snaps; i++) {
3194 if ((rbd_dev->header.features & RBD_FEATURE_FAST_DIFF) &&
3195 i + 1 < snapc->num_snaps)
3196 new_state = OBJECT_EXISTS_CLEAN;
3198 new_state = OBJECT_EXISTS;
3200 ret = rbd_object_map_update(obj_req, snapc->snaps[i],
3203 obj_req->pending.result = ret;
3208 obj_req->pending.num_pending++;
3212 static void rbd_obj_copyup_write_object(struct rbd_obj_request *obj_req)
3214 u32 bytes = rbd_obj_img_extents_bytes(obj_req);
3217 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
3220 * Only send non-zero copyup data to save some I/O and network
3221 * bandwidth -- zero copyup data is equivalent to the object not
3224 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3227 if (obj_req->img_request->snapc->num_snaps && bytes > 0) {
3229 * Send a copyup request with an empty snapshot context to
3230 * deep-copyup the object through all existing snapshots.
3231 * A second request with the current snapshot context will be
3232 * sent for the actual modification.
3234 ret = rbd_obj_copyup_empty_snapc(obj_req, bytes);
3236 obj_req->pending.result = ret;
3240 obj_req->pending.num_pending++;
3244 ret = rbd_obj_copyup_current_snapc(obj_req, bytes);
3246 obj_req->pending.result = ret;
3250 obj_req->pending.num_pending++;
3253 static bool rbd_obj_advance_copyup(struct rbd_obj_request *obj_req, int *result)
3255 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3259 switch (obj_req->copyup_state) {
3260 case RBD_OBJ_COPYUP_START:
3261 rbd_assert(!*result);
3263 ret = rbd_obj_copyup_read_parent(obj_req);
3268 if (obj_req->num_img_extents)
3269 obj_req->copyup_state = RBD_OBJ_COPYUP_READ_PARENT;
3271 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3273 case RBD_OBJ_COPYUP_READ_PARENT:
3277 if (is_zero_bvecs(obj_req->copyup_bvecs,
3278 rbd_obj_img_extents_bytes(obj_req))) {
3279 dout("%s %p detected zeros\n", __func__, obj_req);
3280 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ZEROS;
3283 rbd_obj_copyup_object_maps(obj_req);
3284 if (!obj_req->pending.num_pending) {
3285 *result = obj_req->pending.result;
3286 obj_req->copyup_state = RBD_OBJ_COPYUP_OBJECT_MAPS;
3289 obj_req->copyup_state = __RBD_OBJ_COPYUP_OBJECT_MAPS;
3291 case __RBD_OBJ_COPYUP_OBJECT_MAPS:
3292 if (!pending_result_dec(&obj_req->pending, result))
3295 case RBD_OBJ_COPYUP_OBJECT_MAPS:
3297 rbd_warn(rbd_dev, "snap object map update failed: %d",
3302 rbd_obj_copyup_write_object(obj_req);
3303 if (!obj_req->pending.num_pending) {
3304 *result = obj_req->pending.result;
3305 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3308 obj_req->copyup_state = __RBD_OBJ_COPYUP_WRITE_OBJECT;
3310 case __RBD_OBJ_COPYUP_WRITE_OBJECT:
3311 if (!pending_result_dec(&obj_req->pending, result))
3314 case RBD_OBJ_COPYUP_WRITE_OBJECT:
3323 * 0 - object map update sent
3324 * 1 - object map update isn't needed
3327 static int rbd_obj_write_post_object_map(struct rbd_obj_request *obj_req)
3329 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3330 u8 current_state = OBJECT_PENDING;
3332 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3335 if (!(obj_req->flags & RBD_OBJ_FLAG_DELETION))
3338 return rbd_object_map_update(obj_req, CEPH_NOSNAP, OBJECT_NONEXISTENT,
3342 static bool rbd_obj_advance_write(struct rbd_obj_request *obj_req, int *result)
3344 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3348 switch (obj_req->write_state) {
3349 case RBD_OBJ_WRITE_START:
3350 rbd_assert(!*result);
3352 if (rbd_obj_write_is_noop(obj_req))
3355 ret = rbd_obj_write_pre_object_map(obj_req);
3360 obj_req->write_state = RBD_OBJ_WRITE_PRE_OBJECT_MAP;
3364 case RBD_OBJ_WRITE_PRE_OBJECT_MAP:
3366 rbd_warn(rbd_dev, "pre object map update failed: %d",
3370 ret = rbd_obj_write_object(obj_req);
3375 obj_req->write_state = RBD_OBJ_WRITE_OBJECT;
3377 case RBD_OBJ_WRITE_OBJECT:
3378 if (*result == -ENOENT) {
3379 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
3381 obj_req->copyup_state = RBD_OBJ_COPYUP_START;
3382 obj_req->write_state = __RBD_OBJ_WRITE_COPYUP;
3386 * On a non-existent object:
3387 * delete - -ENOENT, truncate/zero - 0
3389 if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
3395 obj_req->write_state = RBD_OBJ_WRITE_COPYUP;
3397 case __RBD_OBJ_WRITE_COPYUP:
3398 if (!rbd_obj_advance_copyup(obj_req, result))
3401 case RBD_OBJ_WRITE_COPYUP:
3403 rbd_warn(rbd_dev, "copyup failed: %d", *result);
3406 ret = rbd_obj_write_post_object_map(obj_req);
3411 obj_req->write_state = RBD_OBJ_WRITE_POST_OBJECT_MAP;
3415 case RBD_OBJ_WRITE_POST_OBJECT_MAP:
3417 rbd_warn(rbd_dev, "post object map update failed: %d",
3426 * Return true if @obj_req is completed.
3428 static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req,
3431 struct rbd_img_request *img_req = obj_req->img_request;
3432 struct rbd_device *rbd_dev = img_req->rbd_dev;
3435 mutex_lock(&obj_req->state_mutex);
3436 if (!rbd_img_is_write(img_req))
3437 done = rbd_obj_advance_read(obj_req, result);
3439 done = rbd_obj_advance_write(obj_req, result);
3440 mutex_unlock(&obj_req->state_mutex);
3442 if (done && *result) {
3443 rbd_assert(*result < 0);
3444 rbd_warn(rbd_dev, "%s at objno %llu %llu~%llu result %d",
3445 obj_op_name(img_req->op_type), obj_req->ex.oe_objno,
3446 obj_req->ex.oe_off, obj_req->ex.oe_len, *result);
3452 * This is open-coded in rbd_img_handle_request() to avoid parent chain
3455 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result)
3457 if (__rbd_obj_handle_request(obj_req, &result))
3458 rbd_img_handle_request(obj_req->img_request, result);
3461 static bool need_exclusive_lock(struct rbd_img_request *img_req)
3463 struct rbd_device *rbd_dev = img_req->rbd_dev;
3465 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK))
3468 if (rbd_is_ro(rbd_dev))
3471 rbd_assert(!test_bit(IMG_REQ_CHILD, &img_req->flags));
3472 if (rbd_dev->opts->lock_on_read ||
3473 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3476 return rbd_img_is_write(img_req);
3479 static bool rbd_lock_add_request(struct rbd_img_request *img_req)
3481 struct rbd_device *rbd_dev = img_req->rbd_dev;
3484 lockdep_assert_held(&rbd_dev->lock_rwsem);
3485 locked = rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED;
3486 spin_lock(&rbd_dev->lock_lists_lock);
3487 rbd_assert(list_empty(&img_req->lock_item));
3489 list_add_tail(&img_req->lock_item, &rbd_dev->acquiring_list);
3491 list_add_tail(&img_req->lock_item, &rbd_dev->running_list);
3492 spin_unlock(&rbd_dev->lock_lists_lock);
3496 static void rbd_lock_del_request(struct rbd_img_request *img_req)
3498 struct rbd_device *rbd_dev = img_req->rbd_dev;
3501 lockdep_assert_held(&rbd_dev->lock_rwsem);
3502 spin_lock(&rbd_dev->lock_lists_lock);
3503 rbd_assert(!list_empty(&img_req->lock_item));
3504 list_del_init(&img_req->lock_item);
3505 need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
3506 list_empty(&rbd_dev->running_list));
3507 spin_unlock(&rbd_dev->lock_lists_lock);
3509 complete(&rbd_dev->releasing_wait);
3512 static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
3514 struct rbd_device *rbd_dev = img_req->rbd_dev;
3516 if (!need_exclusive_lock(img_req))
3519 if (rbd_lock_add_request(img_req))
3522 if (rbd_dev->opts->exclusive) {
3523 WARN_ON(1); /* lock got released? */
3528 * Note the use of mod_delayed_work() in rbd_acquire_lock()
3529 * and cancel_delayed_work() in wake_lock_waiters().
3531 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
3532 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
3536 static void rbd_img_object_requests(struct rbd_img_request *img_req)
3538 struct rbd_obj_request *obj_req;
3540 rbd_assert(!img_req->pending.result && !img_req->pending.num_pending);
3542 for_each_obj_request(img_req, obj_req) {
3545 if (__rbd_obj_handle_request(obj_req, &result)) {
3547 img_req->pending.result = result;
3551 img_req->pending.num_pending++;
3556 static bool rbd_img_advance(struct rbd_img_request *img_req, int *result)
3558 struct rbd_device *rbd_dev = img_req->rbd_dev;
3562 switch (img_req->state) {
3564 rbd_assert(!*result);
3566 ret = rbd_img_exclusive_lock(img_req);
3571 img_req->state = RBD_IMG_EXCLUSIVE_LOCK;
3575 case RBD_IMG_EXCLUSIVE_LOCK:
3579 rbd_assert(!need_exclusive_lock(img_req) ||
3580 __rbd_is_lock_owner(rbd_dev));
3582 rbd_img_object_requests(img_req);
3583 if (!img_req->pending.num_pending) {
3584 *result = img_req->pending.result;
3585 img_req->state = RBD_IMG_OBJECT_REQUESTS;
3588 img_req->state = __RBD_IMG_OBJECT_REQUESTS;
3590 case __RBD_IMG_OBJECT_REQUESTS:
3591 if (!pending_result_dec(&img_req->pending, result))
3594 case RBD_IMG_OBJECT_REQUESTS:
3602 * Return true if @img_req is completed.
3604 static bool __rbd_img_handle_request(struct rbd_img_request *img_req,
3607 struct rbd_device *rbd_dev = img_req->rbd_dev;
3610 if (need_exclusive_lock(img_req)) {
3611 down_read(&rbd_dev->lock_rwsem);
3612 mutex_lock(&img_req->state_mutex);
3613 done = rbd_img_advance(img_req, result);
3615 rbd_lock_del_request(img_req);
3616 mutex_unlock(&img_req->state_mutex);
3617 up_read(&rbd_dev->lock_rwsem);
3619 mutex_lock(&img_req->state_mutex);
3620 done = rbd_img_advance(img_req, result);
3621 mutex_unlock(&img_req->state_mutex);
3624 if (done && *result) {
3625 rbd_assert(*result < 0);
3626 rbd_warn(rbd_dev, "%s%s result %d",
3627 test_bit(IMG_REQ_CHILD, &img_req->flags) ? "child " : "",
3628 obj_op_name(img_req->op_type), *result);
3633 static void rbd_img_handle_request(struct rbd_img_request *img_req, int result)
3636 if (!__rbd_img_handle_request(img_req, &result))
3639 if (test_bit(IMG_REQ_CHILD, &img_req->flags)) {
3640 struct rbd_obj_request *obj_req = img_req->obj_request;
3642 rbd_img_request_destroy(img_req);
3643 if (__rbd_obj_handle_request(obj_req, &result)) {
3644 img_req = obj_req->img_request;
3648 struct request *rq = blk_mq_rq_from_pdu(img_req);
3650 rbd_img_request_destroy(img_req);
3651 blk_mq_end_request(rq, errno_to_blk_status(result));
3655 static const struct rbd_client_id rbd_empty_cid;
3657 static bool rbd_cid_equal(const struct rbd_client_id *lhs,
3658 const struct rbd_client_id *rhs)
3660 return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
3663 static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
3665 struct rbd_client_id cid;
3667 mutex_lock(&rbd_dev->watch_mutex);
3668 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
3669 cid.handle = rbd_dev->watch_cookie;
3670 mutex_unlock(&rbd_dev->watch_mutex);
3675 * lock_rwsem must be held for write
3677 static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
3678 const struct rbd_client_id *cid)
3680 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
3681 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
3682 cid->gid, cid->handle);
3683 rbd_dev->owner_cid = *cid; /* struct */
3686 static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
3688 mutex_lock(&rbd_dev->watch_mutex);
3689 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
3690 mutex_unlock(&rbd_dev->watch_mutex);
3693 static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
3695 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3697 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
3698 strcpy(rbd_dev->lock_cookie, cookie);
3699 rbd_set_owner_cid(rbd_dev, &cid);
3700 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3704 * lock_rwsem must be held for write
3706 static int rbd_lock(struct rbd_device *rbd_dev)
3708 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3712 WARN_ON(__rbd_is_lock_owner(rbd_dev) ||
3713 rbd_dev->lock_cookie[0] != '\0');
3715 format_lock_cookie(rbd_dev, cookie);
3716 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3717 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
3718 RBD_LOCK_TAG, "", 0);
3722 __rbd_lock(rbd_dev, cookie);
3727 * lock_rwsem must be held for write
3729 static void rbd_unlock(struct rbd_device *rbd_dev)
3731 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3734 WARN_ON(!__rbd_is_lock_owner(rbd_dev) ||
3735 rbd_dev->lock_cookie[0] == '\0');
3737 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3738 RBD_LOCK_NAME, rbd_dev->lock_cookie);
3739 if (ret && ret != -ENOENT)
3740 rbd_warn(rbd_dev, "failed to unlock header: %d", ret);
3742 /* treat errors as the image is unlocked */
3743 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
3744 rbd_dev->lock_cookie[0] = '\0';
3745 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3746 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
3749 static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
3750 enum rbd_notify_op notify_op,
3751 struct page ***preply_pages,
3754 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3755 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3756 char buf[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN];
3757 int buf_size = sizeof(buf);
3760 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
3762 /* encode *LockPayload NotifyMessage (op + ClientId) */
3763 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
3764 ceph_encode_32(&p, notify_op);
3765 ceph_encode_64(&p, cid.gid);
3766 ceph_encode_64(&p, cid.handle);
3768 return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
3769 &rbd_dev->header_oloc, buf, buf_size,
3770 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
3773 static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
3774 enum rbd_notify_op notify_op)
3776 __rbd_notify_op_lock(rbd_dev, notify_op, NULL, NULL);
3779 static void rbd_notify_acquired_lock(struct work_struct *work)
3781 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3782 acquired_lock_work);
3784 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
3787 static void rbd_notify_released_lock(struct work_struct *work)
3789 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3790 released_lock_work);
3792 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
3795 static int rbd_request_lock(struct rbd_device *rbd_dev)
3797 struct page **reply_pages;
3799 bool lock_owner_responded = false;
3802 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3804 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
3805 &reply_pages, &reply_len);
3806 if (ret && ret != -ETIMEDOUT) {
3807 rbd_warn(rbd_dev, "failed to request lock: %d", ret);
3811 if (reply_len > 0 && reply_len <= PAGE_SIZE) {
3812 void *p = page_address(reply_pages[0]);
3813 void *const end = p + reply_len;
3816 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
3821 ceph_decode_need(&p, end, 8 + 8, e_inval);
3822 p += 8 + 8; /* skip gid and cookie */
3824 ceph_decode_32_safe(&p, end, len, e_inval);
3828 if (lock_owner_responded) {
3830 "duplicate lock owners detected");
3835 lock_owner_responded = true;
3836 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
3840 "failed to decode ResponseMessage: %d",
3845 ret = ceph_decode_32(&p);
3849 if (!lock_owner_responded) {
3850 rbd_warn(rbd_dev, "no lock owners detected");
3855 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3864 * Either image request state machine(s) or rbd_add_acquire_lock()
3867 static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
3869 struct rbd_img_request *img_req;
3871 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
3872 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
3874 cancel_delayed_work(&rbd_dev->lock_dwork);
3875 if (!completion_done(&rbd_dev->acquire_wait)) {
3876 rbd_assert(list_empty(&rbd_dev->acquiring_list) &&
3877 list_empty(&rbd_dev->running_list));
3878 rbd_dev->acquire_err = result;
3879 complete_all(&rbd_dev->acquire_wait);
3883 list_for_each_entry(img_req, &rbd_dev->acquiring_list, lock_item) {
3884 mutex_lock(&img_req->state_mutex);
3885 rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK);
3886 rbd_img_schedule(img_req, result);
3887 mutex_unlock(&img_req->state_mutex);
3890 list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
3893 static int get_lock_owner_info(struct rbd_device *rbd_dev,
3894 struct ceph_locker **lockers, u32 *num_lockers)
3896 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3901 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3903 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
3904 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3905 &lock_type, &lock_tag, lockers, num_lockers);
3909 if (*num_lockers == 0) {
3910 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
3914 if (strcmp(lock_tag, RBD_LOCK_TAG)) {
3915 rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
3921 if (lock_type == CEPH_CLS_LOCK_SHARED) {
3922 rbd_warn(rbd_dev, "shared lock type detected");
3927 if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
3928 strlen(RBD_LOCK_COOKIE_PREFIX))) {
3929 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
3930 (*lockers)[0].id.cookie);
3940 static int find_watcher(struct rbd_device *rbd_dev,
3941 const struct ceph_locker *locker)
3943 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3944 struct ceph_watch_item *watchers;
3950 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
3951 &rbd_dev->header_oloc, &watchers,
3956 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
3957 for (i = 0; i < num_watchers; i++) {
3958 if (!memcmp(&watchers[i].addr, &locker->info.addr,
3959 sizeof(locker->info.addr)) &&
3960 watchers[i].cookie == cookie) {
3961 struct rbd_client_id cid = {
3962 .gid = le64_to_cpu(watchers[i].name.num),
3966 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
3967 rbd_dev, cid.gid, cid.handle);
3968 rbd_set_owner_cid(rbd_dev, &cid);
3974 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
3982 * lock_rwsem must be held for write
3984 static int rbd_try_lock(struct rbd_device *rbd_dev)
3986 struct ceph_client *client = rbd_dev->rbd_client->client;
3987 struct ceph_locker *lockers;
3992 ret = rbd_lock(rbd_dev);
3996 /* determine if the current lock holder is still alive */
3997 ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
4001 if (num_lockers == 0)
4004 ret = find_watcher(rbd_dev, lockers);
4006 goto out; /* request lock or error */
4008 rbd_warn(rbd_dev, "breaking header lock owned by %s%llu",
4009 ENTITY_NAME(lockers[0].id.name));
4011 ret = ceph_monc_blacklist_add(&client->monc,
4012 &lockers[0].info.addr);
4014 rbd_warn(rbd_dev, "blacklist of %s%llu failed: %d",
4015 ENTITY_NAME(lockers[0].id.name), ret);
4019 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
4020 &rbd_dev->header_oloc, RBD_LOCK_NAME,
4021 lockers[0].id.cookie,
4022 &lockers[0].id.name);
4023 if (ret && ret != -ENOENT)
4027 ceph_free_lockers(lockers, num_lockers);
4031 ceph_free_lockers(lockers, num_lockers);
4035 static int rbd_post_acquire_action(struct rbd_device *rbd_dev)
4039 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) {
4040 ret = rbd_object_map_open(rbd_dev);
4051 * 1 - caller should call rbd_request_lock()
4054 static int rbd_try_acquire_lock(struct rbd_device *rbd_dev)
4058 down_read(&rbd_dev->lock_rwsem);
4059 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
4060 rbd_dev->lock_state);
4061 if (__rbd_is_lock_owner(rbd_dev)) {
4062 up_read(&rbd_dev->lock_rwsem);
4066 up_read(&rbd_dev->lock_rwsem);
4067 down_write(&rbd_dev->lock_rwsem);
4068 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
4069 rbd_dev->lock_state);
4070 if (__rbd_is_lock_owner(rbd_dev)) {
4071 up_write(&rbd_dev->lock_rwsem);
4075 ret = rbd_try_lock(rbd_dev);
4077 rbd_warn(rbd_dev, "failed to lock header: %d", ret);
4078 if (ret == -EBLACKLISTED)
4081 ret = 1; /* request lock anyway */
4084 up_write(&rbd_dev->lock_rwsem);
4088 rbd_assert(rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED);
4089 rbd_assert(list_empty(&rbd_dev->running_list));
4091 ret = rbd_post_acquire_action(rbd_dev);
4093 rbd_warn(rbd_dev, "post-acquire action failed: %d", ret);
4095 * Can't stay in RBD_LOCK_STATE_LOCKED because
4096 * rbd_lock_add_request() would let the request through,
4097 * assuming that e.g. object map is locked and loaded.
4099 rbd_unlock(rbd_dev);
4103 wake_lock_waiters(rbd_dev, ret);
4104 up_write(&rbd_dev->lock_rwsem);
4108 static void rbd_acquire_lock(struct work_struct *work)
4110 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4111 struct rbd_device, lock_dwork);
4114 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4116 ret = rbd_try_acquire_lock(rbd_dev);
4118 dout("%s rbd_dev %p ret %d - done\n", __func__, rbd_dev, ret);
4122 ret = rbd_request_lock(rbd_dev);
4123 if (ret == -ETIMEDOUT) {
4124 goto again; /* treat this as a dead client */
4125 } else if (ret == -EROFS) {
4126 rbd_warn(rbd_dev, "peer will not release lock");
4127 down_write(&rbd_dev->lock_rwsem);
4128 wake_lock_waiters(rbd_dev, ret);
4129 up_write(&rbd_dev->lock_rwsem);
4130 } else if (ret < 0) {
4131 rbd_warn(rbd_dev, "error requesting lock: %d", ret);
4132 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4136 * lock owner acked, but resend if we don't see them
4139 dout("%s rbd_dev %p requeuing lock_dwork\n", __func__,
4141 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4142 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
4146 static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
4150 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4151 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
4153 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
4157 * Ensure that all in-flight IO is flushed.
4159 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
4160 rbd_assert(!completion_done(&rbd_dev->releasing_wait));
4161 need_wait = !list_empty(&rbd_dev->running_list);
4162 downgrade_write(&rbd_dev->lock_rwsem);
4164 wait_for_completion(&rbd_dev->releasing_wait);
4165 up_read(&rbd_dev->lock_rwsem);
4167 down_write(&rbd_dev->lock_rwsem);
4168 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
4171 rbd_assert(list_empty(&rbd_dev->running_list));
4175 static void rbd_pre_release_action(struct rbd_device *rbd_dev)
4177 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)
4178 rbd_object_map_close(rbd_dev);
4181 static void __rbd_release_lock(struct rbd_device *rbd_dev)
4183 rbd_assert(list_empty(&rbd_dev->running_list));
4185 rbd_pre_release_action(rbd_dev);
4186 rbd_unlock(rbd_dev);
4190 * lock_rwsem must be held for write
4192 static void rbd_release_lock(struct rbd_device *rbd_dev)
4194 if (!rbd_quiesce_lock(rbd_dev))
4197 __rbd_release_lock(rbd_dev);
4200 * Give others a chance to grab the lock - we would re-acquire
4201 * almost immediately if we got new IO while draining the running
4202 * list otherwise. We need to ack our own notifications, so this
4203 * lock_dwork will be requeued from rbd_handle_released_lock() by
4204 * way of maybe_kick_acquire().
4206 cancel_delayed_work(&rbd_dev->lock_dwork);
4209 static void rbd_release_lock_work(struct work_struct *work)
4211 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
4214 down_write(&rbd_dev->lock_rwsem);
4215 rbd_release_lock(rbd_dev);
4216 up_write(&rbd_dev->lock_rwsem);
4219 static void maybe_kick_acquire(struct rbd_device *rbd_dev)
4223 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4224 if (__rbd_is_lock_owner(rbd_dev))
4227 spin_lock(&rbd_dev->lock_lists_lock);
4228 have_requests = !list_empty(&rbd_dev->acquiring_list);
4229 spin_unlock(&rbd_dev->lock_lists_lock);
4230 if (have_requests || delayed_work_pending(&rbd_dev->lock_dwork)) {
4231 dout("%s rbd_dev %p kicking lock_dwork\n", __func__, rbd_dev);
4232 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4236 static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
4239 struct rbd_client_id cid = { 0 };
4241 if (struct_v >= 2) {
4242 cid.gid = ceph_decode_64(p);
4243 cid.handle = ceph_decode_64(p);
4246 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4248 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
4249 down_write(&rbd_dev->lock_rwsem);
4250 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
4252 * we already know that the remote client is
4255 up_write(&rbd_dev->lock_rwsem);
4259 rbd_set_owner_cid(rbd_dev, &cid);
4260 downgrade_write(&rbd_dev->lock_rwsem);
4262 down_read(&rbd_dev->lock_rwsem);
4265 maybe_kick_acquire(rbd_dev);
4266 up_read(&rbd_dev->lock_rwsem);
4269 static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
4272 struct rbd_client_id cid = { 0 };
4274 if (struct_v >= 2) {
4275 cid.gid = ceph_decode_64(p);
4276 cid.handle = ceph_decode_64(p);
4279 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4281 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
4282 down_write(&rbd_dev->lock_rwsem);
4283 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
4284 dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
4285 __func__, rbd_dev, cid.gid, cid.handle,
4286 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
4287 up_write(&rbd_dev->lock_rwsem);
4291 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4292 downgrade_write(&rbd_dev->lock_rwsem);
4294 down_read(&rbd_dev->lock_rwsem);
4297 maybe_kick_acquire(rbd_dev);
4298 up_read(&rbd_dev->lock_rwsem);
4302 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no
4303 * ResponseMessage is needed.
4305 static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
4308 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
4309 struct rbd_client_id cid = { 0 };
4312 if (struct_v >= 2) {
4313 cid.gid = ceph_decode_64(p);
4314 cid.handle = ceph_decode_64(p);
4317 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4319 if (rbd_cid_equal(&cid, &my_cid))
4322 down_read(&rbd_dev->lock_rwsem);
4323 if (__rbd_is_lock_owner(rbd_dev)) {
4324 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED &&
4325 rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid))
4329 * encode ResponseMessage(0) so the peer can detect
4334 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
4335 if (!rbd_dev->opts->exclusive) {
4336 dout("%s rbd_dev %p queueing unlock_work\n",
4338 queue_work(rbd_dev->task_wq,
4339 &rbd_dev->unlock_work);
4341 /* refuse to release the lock */
4348 up_read(&rbd_dev->lock_rwsem);
4352 static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
4353 u64 notify_id, u64 cookie, s32 *result)
4355 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4356 char buf[4 + CEPH_ENCODING_START_BLK_LEN];
4357 int buf_size = sizeof(buf);
4363 /* encode ResponseMessage */
4364 ceph_start_encoding(&p, 1, 1,
4365 buf_size - CEPH_ENCODING_START_BLK_LEN);
4366 ceph_encode_32(&p, *result);
4371 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
4372 &rbd_dev->header_oloc, notify_id, cookie,
4375 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
4378 static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
4381 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4382 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
4385 static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
4386 u64 notify_id, u64 cookie, s32 result)
4388 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
4389 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
4392 static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
4393 u64 notifier_id, void *data, size_t data_len)
4395 struct rbd_device *rbd_dev = arg;
4397 void *const end = p + data_len;
4403 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
4404 __func__, rbd_dev, cookie, notify_id, data_len);
4406 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
4409 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
4414 notify_op = ceph_decode_32(&p);
4416 /* legacy notification for header updates */
4417 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
4421 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
4422 switch (notify_op) {
4423 case RBD_NOTIFY_OP_ACQUIRED_LOCK:
4424 rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
4425 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4427 case RBD_NOTIFY_OP_RELEASED_LOCK:
4428 rbd_handle_released_lock(rbd_dev, struct_v, &p);
4429 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4431 case RBD_NOTIFY_OP_REQUEST_LOCK:
4432 ret = rbd_handle_request_lock(rbd_dev, struct_v, &p);
4434 rbd_acknowledge_notify_result(rbd_dev, notify_id,
4437 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4439 case RBD_NOTIFY_OP_HEADER_UPDATE:
4440 ret = rbd_dev_refresh(rbd_dev);
4442 rbd_warn(rbd_dev, "refresh failed: %d", ret);
4444 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4447 if (rbd_is_lock_owner(rbd_dev))
4448 rbd_acknowledge_notify_result(rbd_dev, notify_id,
4449 cookie, -EOPNOTSUPP);
4451 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4456 static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
4458 static void rbd_watch_errcb(void *arg, u64 cookie, int err)
4460 struct rbd_device *rbd_dev = arg;
4462 rbd_warn(rbd_dev, "encountered watch error: %d", err);
4464 down_write(&rbd_dev->lock_rwsem);
4465 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4466 up_write(&rbd_dev->lock_rwsem);
4468 mutex_lock(&rbd_dev->watch_mutex);
4469 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
4470 __rbd_unregister_watch(rbd_dev);
4471 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
4473 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
4475 mutex_unlock(&rbd_dev->watch_mutex);
4479 * watch_mutex must be locked
4481 static int __rbd_register_watch(struct rbd_device *rbd_dev)
4483 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4484 struct ceph_osd_linger_request *handle;
4486 rbd_assert(!rbd_dev->watch_handle);
4487 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4489 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
4490 &rbd_dev->header_oloc, rbd_watch_cb,
4491 rbd_watch_errcb, rbd_dev);
4493 return PTR_ERR(handle);
4495 rbd_dev->watch_handle = handle;
4500 * watch_mutex must be locked
4502 static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
4504 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4507 rbd_assert(rbd_dev->watch_handle);
4508 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4510 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
4512 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
4514 rbd_dev->watch_handle = NULL;
4517 static int rbd_register_watch(struct rbd_device *rbd_dev)
4521 mutex_lock(&rbd_dev->watch_mutex);
4522 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
4523 ret = __rbd_register_watch(rbd_dev);
4527 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4528 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4531 mutex_unlock(&rbd_dev->watch_mutex);
4535 static void cancel_tasks_sync(struct rbd_device *rbd_dev)
4537 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4539 cancel_work_sync(&rbd_dev->acquired_lock_work);
4540 cancel_work_sync(&rbd_dev->released_lock_work);
4541 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
4542 cancel_work_sync(&rbd_dev->unlock_work);
4546 * header_rwsem must not be held to avoid a deadlock with
4547 * rbd_dev_refresh() when flushing notifies.
4549 static void rbd_unregister_watch(struct rbd_device *rbd_dev)
4551 cancel_tasks_sync(rbd_dev);
4553 mutex_lock(&rbd_dev->watch_mutex);
4554 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
4555 __rbd_unregister_watch(rbd_dev);
4556 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
4557 mutex_unlock(&rbd_dev->watch_mutex);
4559 cancel_delayed_work_sync(&rbd_dev->watch_dwork);
4560 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
4564 * lock_rwsem must be held for write
4566 static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
4568 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4572 if (!rbd_quiesce_lock(rbd_dev))
4575 format_lock_cookie(rbd_dev, cookie);
4576 ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid,
4577 &rbd_dev->header_oloc, RBD_LOCK_NAME,
4578 CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie,
4579 RBD_LOCK_TAG, cookie);
4581 if (ret != -EOPNOTSUPP)
4582 rbd_warn(rbd_dev, "failed to update lock cookie: %d",
4586 * Lock cookie cannot be updated on older OSDs, so do
4587 * a manual release and queue an acquire.
4589 __rbd_release_lock(rbd_dev);
4590 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4592 __rbd_lock(rbd_dev, cookie);
4593 wake_lock_waiters(rbd_dev, 0);
4597 static void rbd_reregister_watch(struct work_struct *work)
4599 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4600 struct rbd_device, watch_dwork);
4603 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4605 mutex_lock(&rbd_dev->watch_mutex);
4606 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
4607 mutex_unlock(&rbd_dev->watch_mutex);
4611 ret = __rbd_register_watch(rbd_dev);
4613 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
4614 if (ret != -EBLACKLISTED && ret != -ENOENT) {
4615 queue_delayed_work(rbd_dev->task_wq,
4616 &rbd_dev->watch_dwork,
4618 mutex_unlock(&rbd_dev->watch_mutex);
4622 mutex_unlock(&rbd_dev->watch_mutex);
4623 down_write(&rbd_dev->lock_rwsem);
4624 wake_lock_waiters(rbd_dev, ret);
4625 up_write(&rbd_dev->lock_rwsem);
4629 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4630 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4631 mutex_unlock(&rbd_dev->watch_mutex);
4633 down_write(&rbd_dev->lock_rwsem);
4634 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
4635 rbd_reacquire_lock(rbd_dev);
4636 up_write(&rbd_dev->lock_rwsem);
4638 ret = rbd_dev_refresh(rbd_dev);
4640 rbd_warn(rbd_dev, "reregistration refresh failed: %d", ret);
4644 * Synchronous osd object method call. Returns the number of bytes
4645 * returned in the outbound buffer, or a negative error code.
4647 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
4648 struct ceph_object_id *oid,
4649 struct ceph_object_locator *oloc,
4650 const char *method_name,
4651 const void *outbound,
4652 size_t outbound_size,
4654 size_t inbound_size)
4656 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4657 struct page *req_page = NULL;
4658 struct page *reply_page;
4662 * Method calls are ultimately read operations. The result
4663 * should placed into the inbound buffer provided. They
4664 * also supply outbound data--parameters for the object
4665 * method. Currently if this is present it will be a
4669 if (outbound_size > PAGE_SIZE)
4672 req_page = alloc_page(GFP_KERNEL);
4676 memcpy(page_address(req_page), outbound, outbound_size);
4679 reply_page = alloc_page(GFP_KERNEL);
4682 __free_page(req_page);
4686 ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name,
4687 CEPH_OSD_FLAG_READ, req_page, outbound_size,
4688 &reply_page, &inbound_size);
4690 memcpy(inbound, page_address(reply_page), inbound_size);
4695 __free_page(req_page);
4696 __free_page(reply_page);
4700 static void rbd_queue_workfn(struct work_struct *work)
4702 struct rbd_img_request *img_request =
4703 container_of(work, struct rbd_img_request, work);
4704 struct rbd_device *rbd_dev = img_request->rbd_dev;
4705 enum obj_operation_type op_type = img_request->op_type;
4706 struct request *rq = blk_mq_rq_from_pdu(img_request);
4707 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
4708 u64 length = blk_rq_bytes(rq);
4712 /* Ignore/skip any zero-length requests */
4714 dout("%s: zero-length request\n", __func__);
4716 goto err_img_request;
4719 blk_mq_start_request(rq);
4721 down_read(&rbd_dev->header_rwsem);
4722 mapping_size = rbd_dev->mapping.size;
4723 rbd_img_capture_header(img_request);
4724 up_read(&rbd_dev->header_rwsem);
4726 if (offset + length > mapping_size) {
4727 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
4728 length, mapping_size);
4730 goto err_img_request;
4733 dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev,
4734 img_request, obj_op_name(op_type), offset, length);
4736 if (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_ZEROOUT)
4737 result = rbd_img_fill_nodata(img_request, offset, length);
4739 result = rbd_img_fill_from_bio(img_request, offset, length,
4742 goto err_img_request;
4744 rbd_img_handle_request(img_request, 0);
4748 rbd_img_request_destroy(img_request);
4750 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
4751 obj_op_name(op_type), length, offset, result);
4752 blk_mq_end_request(rq, errno_to_blk_status(result));
4755 static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
4756 const struct blk_mq_queue_data *bd)
4758 struct rbd_device *rbd_dev = hctx->queue->queuedata;
4759 struct rbd_img_request *img_req = blk_mq_rq_to_pdu(bd->rq);
4760 enum obj_operation_type op_type;
4762 switch (req_op(bd->rq)) {
4763 case REQ_OP_DISCARD:
4764 op_type = OBJ_OP_DISCARD;
4766 case REQ_OP_WRITE_ZEROES:
4767 op_type = OBJ_OP_ZEROOUT;
4770 op_type = OBJ_OP_WRITE;
4773 op_type = OBJ_OP_READ;
4776 rbd_warn(rbd_dev, "unknown req_op %d", req_op(bd->rq));
4777 return BLK_STS_IOERR;
4780 rbd_img_request_init(img_req, rbd_dev, op_type);
4782 if (rbd_img_is_write(img_req)) {
4783 if (rbd_is_ro(rbd_dev)) {
4784 rbd_warn(rbd_dev, "%s on read-only mapping",
4785 obj_op_name(img_req->op_type));
4786 return BLK_STS_IOERR;
4788 rbd_assert(!rbd_is_snap(rbd_dev));
4791 INIT_WORK(&img_req->work, rbd_queue_workfn);
4792 queue_work(rbd_wq, &img_req->work);
4796 static void rbd_free_disk(struct rbd_device *rbd_dev)
4798 blk_cleanup_queue(rbd_dev->disk->queue);
4799 blk_mq_free_tag_set(&rbd_dev->tag_set);
4800 put_disk(rbd_dev->disk);
4801 rbd_dev->disk = NULL;
4804 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
4805 struct ceph_object_id *oid,
4806 struct ceph_object_locator *oloc,
4807 void *buf, int buf_len)
4810 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4811 struct ceph_osd_request *req;
4812 struct page **pages;
4813 int num_pages = calc_pages_for(0, buf_len);
4816 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
4820 ceph_oid_copy(&req->r_base_oid, oid);
4821 ceph_oloc_copy(&req->r_base_oloc, oloc);
4822 req->r_flags = CEPH_OSD_FLAG_READ;
4824 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
4825 if (IS_ERR(pages)) {
4826 ret = PTR_ERR(pages);
4830 osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0);
4831 osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false,
4834 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
4838 ceph_osdc_start_request(osdc, req, false);
4839 ret = ceph_osdc_wait_request(osdc, req);
4841 ceph_copy_from_page_vector(pages, buf, 0, ret);
4844 ceph_osdc_put_request(req);
4849 * Read the complete header for the given rbd device. On successful
4850 * return, the rbd_dev->header field will contain up-to-date
4851 * information about the image.
4853 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
4855 struct rbd_image_header_ondisk *ondisk = NULL;
4862 * The complete header will include an array of its 64-bit
4863 * snapshot ids, followed by the names of those snapshots as
4864 * a contiguous block of NUL-terminated strings. Note that
4865 * the number of snapshots could change by the time we read
4866 * it in, in which case we re-read it.
4873 size = sizeof (*ondisk);
4874 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
4876 ondisk = kmalloc(size, GFP_KERNEL);
4880 ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid,
4881 &rbd_dev->header_oloc, ondisk, size);
4884 if ((size_t)ret < size) {
4886 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
4890 if (!rbd_dev_ondisk_valid(ondisk)) {
4892 rbd_warn(rbd_dev, "invalid header");
4896 names_size = le64_to_cpu(ondisk->snap_names_len);
4897 want_count = snap_count;
4898 snap_count = le32_to_cpu(ondisk->snap_count);
4899 } while (snap_count != want_count);
4901 ret = rbd_header_from_disk(rbd_dev, ondisk);
4908 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
4913 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
4914 * try to update its size. If REMOVING is set, updating size
4915 * is just useless work since the device can't be opened.
4917 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
4918 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
4919 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
4920 dout("setting size to %llu sectors", (unsigned long long)size);
4921 set_capacity(rbd_dev->disk, size);
4922 revalidate_disk(rbd_dev->disk);
4926 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
4931 down_write(&rbd_dev->header_rwsem);
4932 mapping_size = rbd_dev->mapping.size;
4934 ret = rbd_dev_header_info(rbd_dev);
4939 * If there is a parent, see if it has disappeared due to the
4940 * mapped image getting flattened.
4942 if (rbd_dev->parent) {
4943 ret = rbd_dev_v2_parent_info(rbd_dev);
4948 rbd_assert(!rbd_is_snap(rbd_dev));
4949 rbd_dev->mapping.size = rbd_dev->header.image_size;
4952 up_write(&rbd_dev->header_rwsem);
4953 if (!ret && mapping_size != rbd_dev->mapping.size)
4954 rbd_dev_update_size(rbd_dev);
4959 static const struct blk_mq_ops rbd_mq_ops = {
4960 .queue_rq = rbd_queue_rq,
4963 static int rbd_init_disk(struct rbd_device *rbd_dev)
4965 struct gendisk *disk;
4966 struct request_queue *q;
4967 unsigned int objset_bytes =
4968 rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
4971 /* create gendisk info */
4972 disk = alloc_disk(single_major ?
4973 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
4974 RBD_MINORS_PER_MAJOR);
4978 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
4980 disk->major = rbd_dev->major;
4981 disk->first_minor = rbd_dev->minor;
4983 disk->flags |= GENHD_FL_EXT_DEVT;
4984 disk->fops = &rbd_bd_ops;
4985 disk->private_data = rbd_dev;
4987 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
4988 rbd_dev->tag_set.ops = &rbd_mq_ops;
4989 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
4990 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
4991 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
4992 rbd_dev->tag_set.nr_hw_queues = num_present_cpus();
4993 rbd_dev->tag_set.cmd_size = sizeof(struct rbd_img_request);
4995 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
4999 q = blk_mq_init_queue(&rbd_dev->tag_set);
5005 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
5006 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
5008 blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT);
5009 q->limits.max_sectors = queue_max_hw_sectors(q);
5010 blk_queue_max_segments(q, USHRT_MAX);
5011 blk_queue_max_segment_size(q, UINT_MAX);
5012 blk_queue_io_min(q, rbd_dev->opts->alloc_size);
5013 blk_queue_io_opt(q, rbd_dev->opts->alloc_size);
5015 if (rbd_dev->opts->trim) {
5016 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
5017 q->limits.discard_granularity = rbd_dev->opts->alloc_size;
5018 blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
5019 blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
5022 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
5023 q->backing_dev_info->capabilities |= BDI_CAP_STABLE_WRITES;
5026 * disk_release() expects a queue ref from add_disk() and will
5027 * put it. Hold an extra ref until add_disk() is called.
5029 WARN_ON(!blk_get_queue(q));
5031 q->queuedata = rbd_dev;
5033 rbd_dev->disk = disk;
5037 blk_mq_free_tag_set(&rbd_dev->tag_set);
5047 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
5049 return container_of(dev, struct rbd_device, dev);
5052 static ssize_t rbd_size_show(struct device *dev,
5053 struct device_attribute *attr, char *buf)
5055 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5057 return sprintf(buf, "%llu\n",
5058 (unsigned long long)rbd_dev->mapping.size);
5061 static ssize_t rbd_features_show(struct device *dev,
5062 struct device_attribute *attr, char *buf)
5064 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5066 return sprintf(buf, "0x%016llx\n", rbd_dev->header.features);
5069 static ssize_t rbd_major_show(struct device *dev,
5070 struct device_attribute *attr, char *buf)
5072 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5075 return sprintf(buf, "%d\n", rbd_dev->major);
5077 return sprintf(buf, "(none)\n");
5080 static ssize_t rbd_minor_show(struct device *dev,
5081 struct device_attribute *attr, char *buf)
5083 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5085 return sprintf(buf, "%d\n", rbd_dev->minor);
5088 static ssize_t rbd_client_addr_show(struct device *dev,
5089 struct device_attribute *attr, char *buf)
5091 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5092 struct ceph_entity_addr *client_addr =
5093 ceph_client_addr(rbd_dev->rbd_client->client);
5095 return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
5096 le32_to_cpu(client_addr->nonce));
5099 static ssize_t rbd_client_id_show(struct device *dev,
5100 struct device_attribute *attr, char *buf)
5102 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5104 return sprintf(buf, "client%lld\n",
5105 ceph_client_gid(rbd_dev->rbd_client->client));
5108 static ssize_t rbd_cluster_fsid_show(struct device *dev,
5109 struct device_attribute *attr, char *buf)
5111 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5113 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
5116 static ssize_t rbd_config_info_show(struct device *dev,
5117 struct device_attribute *attr, char *buf)
5119 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5121 return sprintf(buf, "%s\n", rbd_dev->config_info);
5124 static ssize_t rbd_pool_show(struct device *dev,
5125 struct device_attribute *attr, char *buf)
5127 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5129 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
5132 static ssize_t rbd_pool_id_show(struct device *dev,
5133 struct device_attribute *attr, char *buf)
5135 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5137 return sprintf(buf, "%llu\n",
5138 (unsigned long long) rbd_dev->spec->pool_id);
5141 static ssize_t rbd_pool_ns_show(struct device *dev,
5142 struct device_attribute *attr, char *buf)
5144 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5146 return sprintf(buf, "%s\n", rbd_dev->spec->pool_ns ?: "");
5149 static ssize_t rbd_name_show(struct device *dev,
5150 struct device_attribute *attr, char *buf)
5152 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5154 if (rbd_dev->spec->image_name)
5155 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
5157 return sprintf(buf, "(unknown)\n");
5160 static ssize_t rbd_image_id_show(struct device *dev,
5161 struct device_attribute *attr, char *buf)
5163 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5165 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
5169 * Shows the name of the currently-mapped snapshot (or
5170 * RBD_SNAP_HEAD_NAME for the base image).
5172 static ssize_t rbd_snap_show(struct device *dev,
5173 struct device_attribute *attr,
5176 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5178 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
5181 static ssize_t rbd_snap_id_show(struct device *dev,
5182 struct device_attribute *attr, char *buf)
5184 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5186 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
5190 * For a v2 image, shows the chain of parent images, separated by empty
5191 * lines. For v1 images or if there is no parent, shows "(no parent
5194 static ssize_t rbd_parent_show(struct device *dev,
5195 struct device_attribute *attr,
5198 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5201 if (!rbd_dev->parent)
5202 return sprintf(buf, "(no parent image)\n");
5204 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
5205 struct rbd_spec *spec = rbd_dev->parent_spec;
5207 count += sprintf(&buf[count], "%s"
5208 "pool_id %llu\npool_name %s\n"
5210 "image_id %s\nimage_name %s\n"
5211 "snap_id %llu\nsnap_name %s\n"
5213 !count ? "" : "\n", /* first? */
5214 spec->pool_id, spec->pool_name,
5215 spec->pool_ns ?: "",
5216 spec->image_id, spec->image_name ?: "(unknown)",
5217 spec->snap_id, spec->snap_name,
5218 rbd_dev->parent_overlap);
5224 static ssize_t rbd_image_refresh(struct device *dev,
5225 struct device_attribute *attr,
5229 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5232 ret = rbd_dev_refresh(rbd_dev);
5239 static DEVICE_ATTR(size, 0444, rbd_size_show, NULL);
5240 static DEVICE_ATTR(features, 0444, rbd_features_show, NULL);
5241 static DEVICE_ATTR(major, 0444, rbd_major_show, NULL);
5242 static DEVICE_ATTR(minor, 0444, rbd_minor_show, NULL);
5243 static DEVICE_ATTR(client_addr, 0444, rbd_client_addr_show, NULL);
5244 static DEVICE_ATTR(client_id, 0444, rbd_client_id_show, NULL);
5245 static DEVICE_ATTR(cluster_fsid, 0444, rbd_cluster_fsid_show, NULL);
5246 static DEVICE_ATTR(config_info, 0400, rbd_config_info_show, NULL);
5247 static DEVICE_ATTR(pool, 0444, rbd_pool_show, NULL);
5248 static DEVICE_ATTR(pool_id, 0444, rbd_pool_id_show, NULL);
5249 static DEVICE_ATTR(pool_ns, 0444, rbd_pool_ns_show, NULL);
5250 static DEVICE_ATTR(name, 0444, rbd_name_show, NULL);
5251 static DEVICE_ATTR(image_id, 0444, rbd_image_id_show, NULL);
5252 static DEVICE_ATTR(refresh, 0200, NULL, rbd_image_refresh);
5253 static DEVICE_ATTR(current_snap, 0444, rbd_snap_show, NULL);
5254 static DEVICE_ATTR(snap_id, 0444, rbd_snap_id_show, NULL);
5255 static DEVICE_ATTR(parent, 0444, rbd_parent_show, NULL);
5257 static struct attribute *rbd_attrs[] = {
5258 &dev_attr_size.attr,
5259 &dev_attr_features.attr,
5260 &dev_attr_major.attr,
5261 &dev_attr_minor.attr,
5262 &dev_attr_client_addr.attr,
5263 &dev_attr_client_id.attr,
5264 &dev_attr_cluster_fsid.attr,
5265 &dev_attr_config_info.attr,
5266 &dev_attr_pool.attr,
5267 &dev_attr_pool_id.attr,
5268 &dev_attr_pool_ns.attr,
5269 &dev_attr_name.attr,
5270 &dev_attr_image_id.attr,
5271 &dev_attr_current_snap.attr,
5272 &dev_attr_snap_id.attr,
5273 &dev_attr_parent.attr,
5274 &dev_attr_refresh.attr,
5278 static struct attribute_group rbd_attr_group = {
5282 static const struct attribute_group *rbd_attr_groups[] = {
5287 static void rbd_dev_release(struct device *dev);
5289 static const struct device_type rbd_device_type = {
5291 .groups = rbd_attr_groups,
5292 .release = rbd_dev_release,
5295 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
5297 kref_get(&spec->kref);
5302 static void rbd_spec_free(struct kref *kref);
5303 static void rbd_spec_put(struct rbd_spec *spec)
5306 kref_put(&spec->kref, rbd_spec_free);
5309 static struct rbd_spec *rbd_spec_alloc(void)
5311 struct rbd_spec *spec;
5313 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
5317 spec->pool_id = CEPH_NOPOOL;
5318 spec->snap_id = CEPH_NOSNAP;
5319 kref_init(&spec->kref);
5324 static void rbd_spec_free(struct kref *kref)
5326 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
5328 kfree(spec->pool_name);
5329 kfree(spec->pool_ns);
5330 kfree(spec->image_id);
5331 kfree(spec->image_name);
5332 kfree(spec->snap_name);
5336 static void rbd_dev_free(struct rbd_device *rbd_dev)
5338 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
5339 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
5341 ceph_oid_destroy(&rbd_dev->header_oid);
5342 ceph_oloc_destroy(&rbd_dev->header_oloc);
5343 kfree(rbd_dev->config_info);
5345 rbd_put_client(rbd_dev->rbd_client);
5346 rbd_spec_put(rbd_dev->spec);
5347 kfree(rbd_dev->opts);
5351 static void rbd_dev_release(struct device *dev)
5353 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5354 bool need_put = !!rbd_dev->opts;
5357 destroy_workqueue(rbd_dev->task_wq);
5358 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5361 rbd_dev_free(rbd_dev);
5364 * This is racy, but way better than putting module outside of
5365 * the release callback. The race window is pretty small, so
5366 * doing something similar to dm (dm-builtin.c) is overkill.
5369 module_put(THIS_MODULE);
5372 static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
5373 struct rbd_spec *spec)
5375 struct rbd_device *rbd_dev;
5377 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
5381 spin_lock_init(&rbd_dev->lock);
5382 INIT_LIST_HEAD(&rbd_dev->node);
5383 init_rwsem(&rbd_dev->header_rwsem);
5385 rbd_dev->header.data_pool_id = CEPH_NOPOOL;
5386 ceph_oid_init(&rbd_dev->header_oid);
5387 rbd_dev->header_oloc.pool = spec->pool_id;
5388 if (spec->pool_ns) {
5389 WARN_ON(!*spec->pool_ns);
5390 rbd_dev->header_oloc.pool_ns =
5391 ceph_find_or_create_string(spec->pool_ns,
5392 strlen(spec->pool_ns));
5395 mutex_init(&rbd_dev->watch_mutex);
5396 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
5397 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
5399 init_rwsem(&rbd_dev->lock_rwsem);
5400 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
5401 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
5402 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
5403 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
5404 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
5405 spin_lock_init(&rbd_dev->lock_lists_lock);
5406 INIT_LIST_HEAD(&rbd_dev->acquiring_list);
5407 INIT_LIST_HEAD(&rbd_dev->running_list);
5408 init_completion(&rbd_dev->acquire_wait);
5409 init_completion(&rbd_dev->releasing_wait);
5411 spin_lock_init(&rbd_dev->object_map_lock);
5413 rbd_dev->dev.bus = &rbd_bus_type;
5414 rbd_dev->dev.type = &rbd_device_type;
5415 rbd_dev->dev.parent = &rbd_root_dev;
5416 device_initialize(&rbd_dev->dev);
5418 rbd_dev->rbd_client = rbdc;
5419 rbd_dev->spec = spec;
5425 * Create a mapping rbd_dev.
5427 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
5428 struct rbd_spec *spec,
5429 struct rbd_options *opts)
5431 struct rbd_device *rbd_dev;
5433 rbd_dev = __rbd_dev_create(rbdc, spec);
5437 rbd_dev->opts = opts;
5439 /* get an id and fill in device name */
5440 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
5441 minor_to_rbd_dev_id(1 << MINORBITS),
5443 if (rbd_dev->dev_id < 0)
5446 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
5447 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
5449 if (!rbd_dev->task_wq)
5452 /* we have a ref from do_rbd_add() */
5453 __module_get(THIS_MODULE);
5455 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
5459 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5461 rbd_dev_free(rbd_dev);
5465 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
5468 put_device(&rbd_dev->dev);
5472 * Get the size and object order for an image snapshot, or if
5473 * snap_id is CEPH_NOSNAP, gets this information for the base
5476 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
5477 u8 *order, u64 *snap_size)
5479 __le64 snapid = cpu_to_le64(snap_id);
5484 } __attribute__ ((packed)) size_buf = { 0 };
5486 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5487 &rbd_dev->header_oloc, "get_size",
5488 &snapid, sizeof(snapid),
5489 &size_buf, sizeof(size_buf));
5490 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5493 if (ret < sizeof (size_buf))
5497 *order = size_buf.order;
5498 dout(" order %u", (unsigned int)*order);
5500 *snap_size = le64_to_cpu(size_buf.size);
5502 dout(" snap_id 0x%016llx snap_size = %llu\n",
5503 (unsigned long long)snap_id,
5504 (unsigned long long)*snap_size);
5509 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
5511 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
5512 &rbd_dev->header.obj_order,
5513 &rbd_dev->header.image_size);
5516 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
5523 /* Response will be an encoded string, which includes a length */
5524 size = sizeof(__le32) + RBD_OBJ_PREFIX_LEN_MAX;
5525 reply_buf = kzalloc(size, GFP_KERNEL);
5529 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5530 &rbd_dev->header_oloc, "get_object_prefix",
5531 NULL, 0, reply_buf, size);
5532 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5537 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
5538 p + ret, NULL, GFP_NOIO);
5541 if (IS_ERR(rbd_dev->header.object_prefix)) {
5542 ret = PTR_ERR(rbd_dev->header.object_prefix);
5543 rbd_dev->header.object_prefix = NULL;
5545 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
5553 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
5554 bool read_only, u64 *snap_features)
5563 } __attribute__ ((packed)) features_buf = { 0 };
5567 features_in.snap_id = cpu_to_le64(snap_id);
5568 features_in.read_only = read_only;
5570 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5571 &rbd_dev->header_oloc, "get_features",
5572 &features_in, sizeof(features_in),
5573 &features_buf, sizeof(features_buf));
5574 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5577 if (ret < sizeof (features_buf))
5580 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
5582 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
5587 *snap_features = le64_to_cpu(features_buf.features);
5589 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
5590 (unsigned long long)snap_id,
5591 (unsigned long long)*snap_features,
5592 (unsigned long long)le64_to_cpu(features_buf.incompat));
5597 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
5599 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
5601 &rbd_dev->header.features);
5605 * These are generic image flags, but since they are used only for
5606 * object map, store them in rbd_dev->object_map_flags.
5608 * For the same reason, this function is called only on object map
5609 * (re)load and not on header refresh.
5611 static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev)
5613 __le64 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
5617 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5618 &rbd_dev->header_oloc, "get_flags",
5619 &snapid, sizeof(snapid),
5620 &flags, sizeof(flags));
5623 if (ret < sizeof(flags))
5626 rbd_dev->object_map_flags = le64_to_cpu(flags);
5630 struct parent_image_info {
5632 const char *pool_ns;
5633 const char *image_id;
5641 * The caller is responsible for @pii.
5643 static int decode_parent_image_spec(void **p, void *end,
5644 struct parent_image_info *pii)
5650 ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",
5651 &struct_v, &struct_len);
5655 ceph_decode_64_safe(p, end, pii->pool_id, e_inval);
5656 pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
5657 if (IS_ERR(pii->pool_ns)) {
5658 ret = PTR_ERR(pii->pool_ns);
5659 pii->pool_ns = NULL;
5662 pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
5663 if (IS_ERR(pii->image_id)) {
5664 ret = PTR_ERR(pii->image_id);
5665 pii->image_id = NULL;
5668 ceph_decode_64_safe(p, end, pii->snap_id, e_inval);
5675 static int __get_parent_info(struct rbd_device *rbd_dev,
5676 struct page *req_page,
5677 struct page *reply_page,
5678 struct parent_image_info *pii)
5680 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5681 size_t reply_len = PAGE_SIZE;
5685 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5686 "rbd", "parent_get", CEPH_OSD_FLAG_READ,
5687 req_page, sizeof(u64), &reply_page, &reply_len);
5689 return ret == -EOPNOTSUPP ? 1 : ret;
5691 p = page_address(reply_page);
5692 end = p + reply_len;
5693 ret = decode_parent_image_spec(&p, end, pii);
5697 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5698 "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ,
5699 req_page, sizeof(u64), &reply_page, &reply_len);
5703 p = page_address(reply_page);
5704 end = p + reply_len;
5705 ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);
5706 if (pii->has_overlap)
5707 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
5716 * The caller is responsible for @pii.
5718 static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
5719 struct page *req_page,
5720 struct page *reply_page,
5721 struct parent_image_info *pii)
5723 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5724 size_t reply_len = PAGE_SIZE;
5728 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5729 "rbd", "get_parent", CEPH_OSD_FLAG_READ,
5730 req_page, sizeof(u64), &reply_page, &reply_len);
5734 p = page_address(reply_page);
5735 end = p + reply_len;
5736 ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);
5737 pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
5738 if (IS_ERR(pii->image_id)) {
5739 ret = PTR_ERR(pii->image_id);
5740 pii->image_id = NULL;
5743 ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);
5744 pii->has_overlap = true;
5745 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
5753 static int get_parent_info(struct rbd_device *rbd_dev,
5754 struct parent_image_info *pii)
5756 struct page *req_page, *reply_page;
5760 req_page = alloc_page(GFP_KERNEL);
5764 reply_page = alloc_page(GFP_KERNEL);
5766 __free_page(req_page);
5770 p = page_address(req_page);
5771 ceph_encode_64(&p, rbd_dev->spec->snap_id);
5772 ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
5774 ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
5777 __free_page(req_page);
5778 __free_page(reply_page);
5782 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
5784 struct rbd_spec *parent_spec;
5785 struct parent_image_info pii = { 0 };
5788 parent_spec = rbd_spec_alloc();
5792 ret = get_parent_info(rbd_dev, &pii);
5796 dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
5797 __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
5798 pii.has_overlap, pii.overlap);
5800 if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
5802 * Either the parent never existed, or we have
5803 * record of it but the image got flattened so it no
5804 * longer has a parent. When the parent of a
5805 * layered image disappears we immediately set the
5806 * overlap to 0. The effect of this is that all new
5807 * requests will be treated as if the image had no
5810 * If !pii.has_overlap, the parent image spec is not
5811 * applicable. It's there to avoid duplication in each
5814 if (rbd_dev->parent_overlap) {
5815 rbd_dev->parent_overlap = 0;
5816 rbd_dev_parent_put(rbd_dev);
5817 pr_info("%s: clone image has been flattened\n",
5818 rbd_dev->disk->disk_name);
5821 goto out; /* No parent? No problem. */
5824 /* The ceph file layout needs to fit pool id in 32 bits */
5827 if (pii.pool_id > (u64)U32_MAX) {
5828 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
5829 (unsigned long long)pii.pool_id, U32_MAX);
5834 * The parent won't change (except when the clone is
5835 * flattened, already handled that). So we only need to
5836 * record the parent spec we have not already done so.
5838 if (!rbd_dev->parent_spec) {
5839 parent_spec->pool_id = pii.pool_id;
5840 if (pii.pool_ns && *pii.pool_ns) {
5841 parent_spec->pool_ns = pii.pool_ns;
5844 parent_spec->image_id = pii.image_id;
5845 pii.image_id = NULL;
5846 parent_spec->snap_id = pii.snap_id;
5848 rbd_dev->parent_spec = parent_spec;
5849 parent_spec = NULL; /* rbd_dev now owns this */
5853 * We always update the parent overlap. If it's zero we issue
5854 * a warning, as we will proceed as if there was no parent.
5858 /* refresh, careful to warn just once */
5859 if (rbd_dev->parent_overlap)
5861 "clone now standalone (overlap became 0)");
5864 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
5867 rbd_dev->parent_overlap = pii.overlap;
5873 kfree(pii.image_id);
5874 rbd_spec_put(parent_spec);
5878 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
5882 __le64 stripe_count;
5883 } __attribute__ ((packed)) striping_info_buf = { 0 };
5884 size_t size = sizeof (striping_info_buf);
5888 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5889 &rbd_dev->header_oloc, "get_stripe_unit_count",
5890 NULL, 0, &striping_info_buf, size);
5891 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5897 p = &striping_info_buf;
5898 rbd_dev->header.stripe_unit = ceph_decode_64(&p);
5899 rbd_dev->header.stripe_count = ceph_decode_64(&p);
5903 static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
5905 __le64 data_pool_id;
5908 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5909 &rbd_dev->header_oloc, "get_data_pool",
5910 NULL, 0, &data_pool_id, sizeof(data_pool_id));
5913 if (ret < sizeof(data_pool_id))
5916 rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
5917 WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
5921 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
5923 CEPH_DEFINE_OID_ONSTACK(oid);
5924 size_t image_id_size;
5929 void *reply_buf = NULL;
5931 char *image_name = NULL;
5934 rbd_assert(!rbd_dev->spec->image_name);
5936 len = strlen(rbd_dev->spec->image_id);
5937 image_id_size = sizeof (__le32) + len;
5938 image_id = kmalloc(image_id_size, GFP_KERNEL);
5943 end = image_id + image_id_size;
5944 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
5946 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
5947 reply_buf = kmalloc(size, GFP_KERNEL);
5951 ceph_oid_printf(&oid, "%s", RBD_DIRECTORY);
5952 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
5953 "dir_get_name", image_id, image_id_size,
5958 end = reply_buf + ret;
5960 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
5961 if (IS_ERR(image_name))
5964 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
5972 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5974 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5975 const char *snap_name;
5978 /* Skip over names until we find the one we are looking for */
5980 snap_name = rbd_dev->header.snap_names;
5981 while (which < snapc->num_snaps) {
5982 if (!strcmp(name, snap_name))
5983 return snapc->snaps[which];
5984 snap_name += strlen(snap_name) + 1;
5990 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5992 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5997 for (which = 0; !found && which < snapc->num_snaps; which++) {
5998 const char *snap_name;
6000 snap_id = snapc->snaps[which];
6001 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
6002 if (IS_ERR(snap_name)) {
6003 /* ignore no-longer existing snapshots */
6004 if (PTR_ERR(snap_name) == -ENOENT)
6009 found = !strcmp(name, snap_name);
6012 return found ? snap_id : CEPH_NOSNAP;
6016 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
6017 * no snapshot by that name is found, or if an error occurs.
6019 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
6021 if (rbd_dev->image_format == 1)
6022 return rbd_v1_snap_id_by_name(rbd_dev, name);
6024 return rbd_v2_snap_id_by_name(rbd_dev, name);
6028 * An image being mapped will have everything but the snap id.
6030 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
6032 struct rbd_spec *spec = rbd_dev->spec;
6034 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
6035 rbd_assert(spec->image_id && spec->image_name);
6036 rbd_assert(spec->snap_name);
6038 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
6041 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
6042 if (snap_id == CEPH_NOSNAP)
6045 spec->snap_id = snap_id;
6047 spec->snap_id = CEPH_NOSNAP;
6054 * A parent image will have all ids but none of the names.
6056 * All names in an rbd spec are dynamically allocated. It's OK if we
6057 * can't figure out the name for an image id.
6059 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
6061 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
6062 struct rbd_spec *spec = rbd_dev->spec;
6063 const char *pool_name;
6064 const char *image_name;
6065 const char *snap_name;
6068 rbd_assert(spec->pool_id != CEPH_NOPOOL);
6069 rbd_assert(spec->image_id);
6070 rbd_assert(spec->snap_id != CEPH_NOSNAP);
6072 /* Get the pool name; we have to make our own copy of this */
6074 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
6076 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
6079 pool_name = kstrdup(pool_name, GFP_KERNEL);
6083 /* Fetch the image name; tolerate failure here */
6085 image_name = rbd_dev_image_name(rbd_dev);
6087 rbd_warn(rbd_dev, "unable to get image name");
6089 /* Fetch the snapshot name */
6091 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
6092 if (IS_ERR(snap_name)) {
6093 ret = PTR_ERR(snap_name);
6097 spec->pool_name = pool_name;
6098 spec->image_name = image_name;
6099 spec->snap_name = snap_name;
6109 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
6118 struct ceph_snap_context *snapc;
6122 * We'll need room for the seq value (maximum snapshot id),
6123 * snapshot count, and array of that many snapshot ids.
6124 * For now we have a fixed upper limit on the number we're
6125 * prepared to receive.
6127 size = sizeof (__le64) + sizeof (__le32) +
6128 RBD_MAX_SNAP_COUNT * sizeof (__le64);
6129 reply_buf = kzalloc(size, GFP_KERNEL);
6133 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6134 &rbd_dev->header_oloc, "get_snapcontext",
6135 NULL, 0, reply_buf, size);
6136 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6141 end = reply_buf + ret;
6143 ceph_decode_64_safe(&p, end, seq, out);
6144 ceph_decode_32_safe(&p, end, snap_count, out);
6147 * Make sure the reported number of snapshot ids wouldn't go
6148 * beyond the end of our buffer. But before checking that,
6149 * make sure the computed size of the snapshot context we
6150 * allocate is representable in a size_t.
6152 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
6157 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
6161 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
6167 for (i = 0; i < snap_count; i++)
6168 snapc->snaps[i] = ceph_decode_64(&p);
6170 ceph_put_snap_context(rbd_dev->header.snapc);
6171 rbd_dev->header.snapc = snapc;
6173 dout(" snap context seq = %llu, snap_count = %u\n",
6174 (unsigned long long)seq, (unsigned int)snap_count);
6181 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
6192 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
6193 reply_buf = kmalloc(size, GFP_KERNEL);
6195 return ERR_PTR(-ENOMEM);
6197 snapid = cpu_to_le64(snap_id);
6198 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6199 &rbd_dev->header_oloc, "get_snapshot_name",
6200 &snapid, sizeof(snapid), reply_buf, size);
6201 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6203 snap_name = ERR_PTR(ret);
6208 end = reply_buf + ret;
6209 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
6210 if (IS_ERR(snap_name))
6213 dout(" snap_id 0x%016llx snap_name = %s\n",
6214 (unsigned long long)snap_id, snap_name);
6221 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
6223 bool first_time = rbd_dev->header.object_prefix == NULL;
6226 ret = rbd_dev_v2_image_size(rbd_dev);
6231 ret = rbd_dev_v2_header_onetime(rbd_dev);
6236 ret = rbd_dev_v2_snap_context(rbd_dev);
6237 if (ret && first_time) {
6238 kfree(rbd_dev->header.object_prefix);
6239 rbd_dev->header.object_prefix = NULL;
6245 static int rbd_dev_header_info(struct rbd_device *rbd_dev)
6247 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6249 if (rbd_dev->image_format == 1)
6250 return rbd_dev_v1_header_info(rbd_dev);
6252 return rbd_dev_v2_header_info(rbd_dev);
6256 * Skips over white space at *buf, and updates *buf to point to the
6257 * first found non-space character (if any). Returns the length of
6258 * the token (string of non-white space characters) found. Note
6259 * that *buf must be terminated with '\0'.
6261 static inline size_t next_token(const char **buf)
6264 * These are the characters that produce nonzero for
6265 * isspace() in the "C" and "POSIX" locales.
6267 const char *spaces = " \f\n\r\t\v";
6269 *buf += strspn(*buf, spaces); /* Find start of token */
6271 return strcspn(*buf, spaces); /* Return token length */
6275 * Finds the next token in *buf, dynamically allocates a buffer big
6276 * enough to hold a copy of it, and copies the token into the new
6277 * buffer. The copy is guaranteed to be terminated with '\0'. Note
6278 * that a duplicate buffer is created even for a zero-length token.
6280 * Returns a pointer to the newly-allocated duplicate, or a null
6281 * pointer if memory for the duplicate was not available. If
6282 * the lenp argument is a non-null pointer, the length of the token
6283 * (not including the '\0') is returned in *lenp.
6285 * If successful, the *buf pointer will be updated to point beyond
6286 * the end of the found token.
6288 * Note: uses GFP_KERNEL for allocation.
6290 static inline char *dup_token(const char **buf, size_t *lenp)
6295 len = next_token(buf);
6296 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
6299 *(dup + len) = '\0';
6308 static int rbd_parse_param(struct fs_parameter *param,
6309 struct rbd_parse_opts_ctx *pctx)
6311 struct rbd_options *opt = pctx->opts;
6312 struct fs_parse_result result;
6313 struct p_log log = {.prefix = "rbd"};
6316 ret = ceph_parse_param(param, pctx->copts, NULL);
6317 if (ret != -ENOPARAM)
6320 token = __fs_parse(&log, rbd_parameters, param, &result);
6321 dout("%s fs_parse '%s' token %d\n", __func__, param->key, token);
6323 if (token == -ENOPARAM)
6324 return inval_plog(&log, "Unknown parameter '%s'",
6330 case Opt_queue_depth:
6331 if (result.uint_32 < 1)
6333 opt->queue_depth = result.uint_32;
6335 case Opt_alloc_size:
6336 if (result.uint_32 < SECTOR_SIZE)
6338 if (!is_power_of_2(result.uint_32))
6339 return inval_plog(&log, "alloc_size must be a power of 2");
6340 opt->alloc_size = result.uint_32;
6342 case Opt_lock_timeout:
6343 /* 0 is "wait forever" (i.e. infinite timeout) */
6344 if (result.uint_32 > INT_MAX / 1000)
6346 opt->lock_timeout = msecs_to_jiffies(result.uint_32 * 1000);
6349 kfree(pctx->spec->pool_ns);
6350 pctx->spec->pool_ns = param->string;
6351 param->string = NULL;
6353 case Opt_compression_hint:
6354 switch (result.uint_32) {
6355 case Opt_compression_hint_none:
6356 opt->alloc_hint_flags &=
6357 ~(CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE |
6358 CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE);
6360 case Opt_compression_hint_compressible:
6361 opt->alloc_hint_flags |=
6362 CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE;
6363 opt->alloc_hint_flags &=
6364 ~CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE;
6366 case Opt_compression_hint_incompressible:
6367 opt->alloc_hint_flags |=
6368 CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE;
6369 opt->alloc_hint_flags &=
6370 ~CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE;
6377 opt->read_only = true;
6379 case Opt_read_write:
6380 opt->read_only = false;
6382 case Opt_lock_on_read:
6383 opt->lock_on_read = true;
6386 opt->exclusive = true;
6398 return inval_plog(&log, "%s out of range", param->key);
6402 * This duplicates most of generic_parse_monolithic(), untying it from
6403 * fs_context and skipping standard superblock and security options.
6405 static int rbd_parse_options(char *options, struct rbd_parse_opts_ctx *pctx)
6410 dout("%s '%s'\n", __func__, options);
6411 while ((key = strsep(&options, ",")) != NULL) {
6413 struct fs_parameter param = {
6415 .type = fs_value_is_flag,
6417 char *value = strchr(key, '=');
6424 v_len = strlen(value);
6425 param.string = kmemdup_nul(value, v_len,
6429 param.type = fs_value_is_string;
6433 ret = rbd_parse_param(¶m, pctx);
6434 kfree(param.string);
6444 * Parse the options provided for an "rbd add" (i.e., rbd image
6445 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
6446 * and the data written is passed here via a NUL-terminated buffer.
6447 * Returns 0 if successful or an error code otherwise.
6449 * The information extracted from these options is recorded in
6450 * the other parameters which return dynamically-allocated
6453 * The address of a pointer that will refer to a ceph options
6454 * structure. Caller must release the returned pointer using
6455 * ceph_destroy_options() when it is no longer needed.
6457 * Address of an rbd options pointer. Fully initialized by
6458 * this function; caller must release with kfree().
6460 * Address of an rbd image specification pointer. Fully
6461 * initialized by this function based on parsed options.
6462 * Caller must release with rbd_spec_put().
6464 * The options passed take this form:
6465 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
6468 * A comma-separated list of one or more monitor addresses.
6469 * A monitor address is an ip address, optionally followed
6470 * by a port number (separated by a colon).
6471 * I.e.: ip1[:port1][,ip2[:port2]...]
6473 * A comma-separated list of ceph and/or rbd options.
6475 * The name of the rados pool containing the rbd image.
6477 * The name of the image in that pool to map.
6479 * An optional snapshot id. If provided, the mapping will
6480 * present data from the image at the time that snapshot was
6481 * created. The image head is used if no snapshot id is
6482 * provided. Snapshot mappings are always read-only.
6484 static int rbd_add_parse_args(const char *buf,
6485 struct ceph_options **ceph_opts,
6486 struct rbd_options **opts,
6487 struct rbd_spec **rbd_spec)
6491 const char *mon_addrs;
6493 size_t mon_addrs_size;
6494 struct rbd_parse_opts_ctx pctx = { 0 };
6497 /* The first four tokens are required */
6499 len = next_token(&buf);
6501 rbd_warn(NULL, "no monitor address(es) provided");
6505 mon_addrs_size = len;
6509 options = dup_token(&buf, NULL);
6513 rbd_warn(NULL, "no options provided");
6517 pctx.spec = rbd_spec_alloc();
6521 pctx.spec->pool_name = dup_token(&buf, NULL);
6522 if (!pctx.spec->pool_name)
6524 if (!*pctx.spec->pool_name) {
6525 rbd_warn(NULL, "no pool name provided");
6529 pctx.spec->image_name = dup_token(&buf, NULL);
6530 if (!pctx.spec->image_name)
6532 if (!*pctx.spec->image_name) {
6533 rbd_warn(NULL, "no image name provided");
6538 * Snapshot name is optional; default is to use "-"
6539 * (indicating the head/no snapshot).
6541 len = next_token(&buf);
6543 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
6544 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
6545 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
6546 ret = -ENAMETOOLONG;
6549 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
6552 *(snap_name + len) = '\0';
6553 pctx.spec->snap_name = snap_name;
6555 pctx.copts = ceph_alloc_options();
6559 /* Initialize all rbd options to the defaults */
6561 pctx.opts = kzalloc(sizeof(*pctx.opts), GFP_KERNEL);
6565 pctx.opts->read_only = RBD_READ_ONLY_DEFAULT;
6566 pctx.opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
6567 pctx.opts->alloc_size = RBD_ALLOC_SIZE_DEFAULT;
6568 pctx.opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
6569 pctx.opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
6570 pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
6571 pctx.opts->trim = RBD_TRIM_DEFAULT;
6573 ret = ceph_parse_mon_ips(mon_addrs, mon_addrs_size, pctx.copts, NULL);
6577 ret = rbd_parse_options(options, &pctx);
6581 *ceph_opts = pctx.copts;
6583 *rbd_spec = pctx.spec;
6591 ceph_destroy_options(pctx.copts);
6592 rbd_spec_put(pctx.spec);
6597 static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
6599 down_write(&rbd_dev->lock_rwsem);
6600 if (__rbd_is_lock_owner(rbd_dev))
6601 __rbd_release_lock(rbd_dev);
6602 up_write(&rbd_dev->lock_rwsem);
6606 * If the wait is interrupted, an error is returned even if the lock
6607 * was successfully acquired. rbd_dev_image_unlock() will release it
6610 static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
6614 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
6615 if (!rbd_dev->opts->exclusive && !rbd_dev->opts->lock_on_read)
6618 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
6622 if (rbd_is_ro(rbd_dev))
6625 rbd_assert(!rbd_is_lock_owner(rbd_dev));
6626 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
6627 ret = wait_for_completion_killable_timeout(&rbd_dev->acquire_wait,
6628 ceph_timeout_jiffies(rbd_dev->opts->lock_timeout));
6630 ret = rbd_dev->acquire_err;
6632 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
6638 rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret);
6643 * The lock may have been released by now, unless automatic lock
6644 * transitions are disabled.
6646 rbd_assert(!rbd_dev->opts->exclusive || rbd_is_lock_owner(rbd_dev));
6651 * An rbd format 2 image has a unique identifier, distinct from the
6652 * name given to it by the user. Internally, that identifier is
6653 * what's used to specify the names of objects related to the image.
6655 * A special "rbd id" object is used to map an rbd image name to its
6656 * id. If that object doesn't exist, then there is no v2 rbd image
6657 * with the supplied name.
6659 * This function will record the given rbd_dev's image_id field if
6660 * it can be determined, and in that case will return 0. If any
6661 * errors occur a negative errno will be returned and the rbd_dev's
6662 * image_id field will be unchanged (and should be NULL).
6664 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
6668 CEPH_DEFINE_OID_ONSTACK(oid);
6673 * When probing a parent image, the image id is already
6674 * known (and the image name likely is not). There's no
6675 * need to fetch the image id again in this case. We
6676 * do still need to set the image format though.
6678 if (rbd_dev->spec->image_id) {
6679 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
6685 * First, see if the format 2 image id file exists, and if
6686 * so, get the image's persistent id from it.
6688 ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX,
6689 rbd_dev->spec->image_name);
6693 dout("rbd id object name is %s\n", oid.name);
6695 /* Response will be an encoded string, which includes a length */
6696 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
6697 response = kzalloc(size, GFP_NOIO);
6703 /* If it doesn't exist we'll assume it's a format 1 image */
6705 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
6708 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6709 if (ret == -ENOENT) {
6710 image_id = kstrdup("", GFP_KERNEL);
6711 ret = image_id ? 0 : -ENOMEM;
6713 rbd_dev->image_format = 1;
6714 } else if (ret >= 0) {
6717 image_id = ceph_extract_encoded_string(&p, p + ret,
6719 ret = PTR_ERR_OR_ZERO(image_id);
6721 rbd_dev->image_format = 2;
6725 rbd_dev->spec->image_id = image_id;
6726 dout("image_id is %s\n", image_id);
6730 ceph_oid_destroy(&oid);
6735 * Undo whatever state changes are made by v1 or v2 header info
6738 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
6740 struct rbd_image_header *header;
6742 rbd_dev_parent_put(rbd_dev);
6743 rbd_object_map_free(rbd_dev);
6744 rbd_dev_mapping_clear(rbd_dev);
6746 /* Free dynamic fields from the header, then zero it out */
6748 header = &rbd_dev->header;
6749 ceph_put_snap_context(header->snapc);
6750 kfree(header->snap_sizes);
6751 kfree(header->snap_names);
6752 kfree(header->object_prefix);
6753 memset(header, 0, sizeof (*header));
6756 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
6760 ret = rbd_dev_v2_object_prefix(rbd_dev);
6765 * Get the and check features for the image. Currently the
6766 * features are assumed to never change.
6768 ret = rbd_dev_v2_features(rbd_dev);
6772 /* If the image supports fancy striping, get its parameters */
6774 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
6775 ret = rbd_dev_v2_striping_info(rbd_dev);
6780 if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
6781 ret = rbd_dev_v2_data_pool(rbd_dev);
6786 rbd_init_layout(rbd_dev);
6790 rbd_dev->header.features = 0;
6791 kfree(rbd_dev->header.object_prefix);
6792 rbd_dev->header.object_prefix = NULL;
6797 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
6798 * rbd_dev_image_probe() recursion depth, which means it's also the
6799 * length of the already discovered part of the parent chain.
6801 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
6803 struct rbd_device *parent = NULL;
6806 if (!rbd_dev->parent_spec)
6809 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
6810 pr_info("parent chain is too long (%d)\n", depth);
6815 parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
6822 * Images related by parent/child relationships always share
6823 * rbd_client and spec/parent_spec, so bump their refcounts.
6825 __rbd_get_client(rbd_dev->rbd_client);
6826 rbd_spec_get(rbd_dev->parent_spec);
6828 __set_bit(RBD_DEV_FLAG_READONLY, &parent->flags);
6830 ret = rbd_dev_image_probe(parent, depth);
6834 rbd_dev->parent = parent;
6835 atomic_set(&rbd_dev->parent_ref, 1);
6839 rbd_dev_unparent(rbd_dev);
6840 rbd_dev_destroy(parent);
6844 static void rbd_dev_device_release(struct rbd_device *rbd_dev)
6846 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6847 rbd_free_disk(rbd_dev);
6849 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6853 * rbd_dev->header_rwsem must be locked for write and will be unlocked
6856 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
6860 /* Record our major and minor device numbers. */
6862 if (!single_major) {
6863 ret = register_blkdev(0, rbd_dev->name);
6865 goto err_out_unlock;
6867 rbd_dev->major = ret;
6870 rbd_dev->major = rbd_major;
6871 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
6874 /* Set up the blkdev mapping. */
6876 ret = rbd_init_disk(rbd_dev);
6878 goto err_out_blkdev;
6880 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
6881 set_disk_ro(rbd_dev->disk, rbd_is_ro(rbd_dev));
6883 ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
6887 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6888 up_write(&rbd_dev->header_rwsem);
6892 rbd_free_disk(rbd_dev);
6895 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6897 up_write(&rbd_dev->header_rwsem);
6901 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
6903 struct rbd_spec *spec = rbd_dev->spec;
6906 /* Record the header object name for this rbd image. */
6908 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6909 if (rbd_dev->image_format == 1)
6910 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6911 spec->image_name, RBD_SUFFIX);
6913 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6914 RBD_HEADER_PREFIX, spec->image_id);
6919 static void rbd_print_dne(struct rbd_device *rbd_dev, bool is_snap)
6922 pr_info("image %s/%s%s%s does not exist\n",
6923 rbd_dev->spec->pool_name,
6924 rbd_dev->spec->pool_ns ?: "",
6925 rbd_dev->spec->pool_ns ? "/" : "",
6926 rbd_dev->spec->image_name);
6928 pr_info("snap %s/%s%s%s@%s does not exist\n",
6929 rbd_dev->spec->pool_name,
6930 rbd_dev->spec->pool_ns ?: "",
6931 rbd_dev->spec->pool_ns ? "/" : "",
6932 rbd_dev->spec->image_name,
6933 rbd_dev->spec->snap_name);
6937 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
6939 if (!rbd_is_ro(rbd_dev))
6940 rbd_unregister_watch(rbd_dev);
6942 rbd_dev_unprobe(rbd_dev);
6943 rbd_dev->image_format = 0;
6944 kfree(rbd_dev->spec->image_id);
6945 rbd_dev->spec->image_id = NULL;
6949 * Probe for the existence of the header object for the given rbd
6950 * device. If this image is the one being mapped (i.e., not a
6951 * parent), initiate a watch on its header object before using that
6952 * object to get detailed information about the rbd image.
6954 * On success, returns with header_rwsem held for write if called
6957 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
6959 bool need_watch = !rbd_is_ro(rbd_dev);
6963 * Get the id from the image id object. Unless there's an
6964 * error, rbd_dev->spec->image_id will be filled in with
6965 * a dynamically-allocated string, and rbd_dev->image_format
6966 * will be set to either 1 or 2.
6968 ret = rbd_dev_image_id(rbd_dev);
6972 ret = rbd_dev_header_name(rbd_dev);
6974 goto err_out_format;
6977 ret = rbd_register_watch(rbd_dev);
6980 rbd_print_dne(rbd_dev, false);
6981 goto err_out_format;
6986 down_write(&rbd_dev->header_rwsem);
6988 ret = rbd_dev_header_info(rbd_dev);
6990 if (ret == -ENOENT && !need_watch)
6991 rbd_print_dne(rbd_dev, false);
6996 * If this image is the one being mapped, we have pool name and
6997 * id, image name and id, and snap name - need to fill snap id.
6998 * Otherwise this is a parent image, identified by pool, image
6999 * and snap ids - need to fill in names for those ids.
7002 ret = rbd_spec_fill_snap_id(rbd_dev);
7004 ret = rbd_spec_fill_names(rbd_dev);
7007 rbd_print_dne(rbd_dev, true);
7011 ret = rbd_dev_mapping_set(rbd_dev);
7015 if (rbd_is_snap(rbd_dev) &&
7016 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)) {
7017 ret = rbd_object_map_load(rbd_dev);
7022 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
7023 ret = rbd_dev_v2_parent_info(rbd_dev);
7028 ret = rbd_dev_probe_parent(rbd_dev, depth);
7032 dout("discovered format %u image, header name is %s\n",
7033 rbd_dev->image_format, rbd_dev->header_oid.name);
7038 up_write(&rbd_dev->header_rwsem);
7040 rbd_unregister_watch(rbd_dev);
7041 rbd_dev_unprobe(rbd_dev);
7043 rbd_dev->image_format = 0;
7044 kfree(rbd_dev->spec->image_id);
7045 rbd_dev->spec->image_id = NULL;
7049 static ssize_t do_rbd_add(struct bus_type *bus,
7053 struct rbd_device *rbd_dev = NULL;
7054 struct ceph_options *ceph_opts = NULL;
7055 struct rbd_options *rbd_opts = NULL;
7056 struct rbd_spec *spec = NULL;
7057 struct rbd_client *rbdc;
7060 if (!try_module_get(THIS_MODULE))
7063 /* parse add command */
7064 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
7068 rbdc = rbd_get_client(ceph_opts);
7075 rc = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, spec->pool_name);
7078 pr_info("pool %s does not exist\n", spec->pool_name);
7079 goto err_out_client;
7081 spec->pool_id = (u64)rc;
7083 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
7086 goto err_out_client;
7088 rbdc = NULL; /* rbd_dev now owns this */
7089 spec = NULL; /* rbd_dev now owns this */
7090 rbd_opts = NULL; /* rbd_dev now owns this */
7092 /* if we are mapping a snapshot it will be a read-only mapping */
7093 if (rbd_dev->opts->read_only ||
7094 strcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME))
7095 __set_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
7097 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
7098 if (!rbd_dev->config_info) {
7100 goto err_out_rbd_dev;
7103 rc = rbd_dev_image_probe(rbd_dev, 0);
7105 goto err_out_rbd_dev;
7107 if (rbd_dev->opts->alloc_size > rbd_dev->layout.object_size) {
7108 rbd_warn(rbd_dev, "alloc_size adjusted to %u",
7109 rbd_dev->layout.object_size);
7110 rbd_dev->opts->alloc_size = rbd_dev->layout.object_size;
7113 rc = rbd_dev_device_setup(rbd_dev);
7115 goto err_out_image_probe;
7117 rc = rbd_add_acquire_lock(rbd_dev);
7119 goto err_out_image_lock;
7121 /* Everything's ready. Announce the disk to the world. */
7123 rc = device_add(&rbd_dev->dev);
7125 goto err_out_image_lock;
7127 device_add_disk(&rbd_dev->dev, rbd_dev->disk, NULL);
7128 /* see rbd_init_disk() */
7129 blk_put_queue(rbd_dev->disk->queue);
7131 spin_lock(&rbd_dev_list_lock);
7132 list_add_tail(&rbd_dev->node, &rbd_dev_list);
7133 spin_unlock(&rbd_dev_list_lock);
7135 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
7136 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
7137 rbd_dev->header.features);
7140 module_put(THIS_MODULE);
7144 rbd_dev_image_unlock(rbd_dev);
7145 rbd_dev_device_release(rbd_dev);
7146 err_out_image_probe:
7147 rbd_dev_image_release(rbd_dev);
7149 rbd_dev_destroy(rbd_dev);
7151 rbd_put_client(rbdc);
7158 static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count)
7163 return do_rbd_add(bus, buf, count);
7166 static ssize_t add_single_major_store(struct bus_type *bus, const char *buf,
7169 return do_rbd_add(bus, buf, count);
7172 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
7174 while (rbd_dev->parent) {
7175 struct rbd_device *first = rbd_dev;
7176 struct rbd_device *second = first->parent;
7177 struct rbd_device *third;
7180 * Follow to the parent with no grandparent and
7183 while (second && (third = second->parent)) {
7188 rbd_dev_image_release(second);
7189 rbd_dev_destroy(second);
7190 first->parent = NULL;
7191 first->parent_overlap = 0;
7193 rbd_assert(first->parent_spec);
7194 rbd_spec_put(first->parent_spec);
7195 first->parent_spec = NULL;
7199 static ssize_t do_rbd_remove(struct bus_type *bus,
7203 struct rbd_device *rbd_dev = NULL;
7204 struct list_head *tmp;
7212 sscanf(buf, "%d %5s", &dev_id, opt_buf);
7214 pr_err("dev_id out of range\n");
7217 if (opt_buf[0] != '\0') {
7218 if (!strcmp(opt_buf, "force")) {
7221 pr_err("bad remove option at '%s'\n", opt_buf);
7227 spin_lock(&rbd_dev_list_lock);
7228 list_for_each(tmp, &rbd_dev_list) {
7229 rbd_dev = list_entry(tmp, struct rbd_device, node);
7230 if (rbd_dev->dev_id == dev_id) {
7236 spin_lock_irq(&rbd_dev->lock);
7237 if (rbd_dev->open_count && !force)
7239 else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING,
7242 spin_unlock_irq(&rbd_dev->lock);
7244 spin_unlock(&rbd_dev_list_lock);
7250 * Prevent new IO from being queued and wait for existing
7251 * IO to complete/fail.
7253 blk_mq_freeze_queue(rbd_dev->disk->queue);
7254 blk_set_queue_dying(rbd_dev->disk->queue);
7257 del_gendisk(rbd_dev->disk);
7258 spin_lock(&rbd_dev_list_lock);
7259 list_del_init(&rbd_dev->node);
7260 spin_unlock(&rbd_dev_list_lock);
7261 device_del(&rbd_dev->dev);
7263 rbd_dev_image_unlock(rbd_dev);
7264 rbd_dev_device_release(rbd_dev);
7265 rbd_dev_image_release(rbd_dev);
7266 rbd_dev_destroy(rbd_dev);
7270 static ssize_t remove_store(struct bus_type *bus, const char *buf, size_t count)
7275 return do_rbd_remove(bus, buf, count);
7278 static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf,
7281 return do_rbd_remove(bus, buf, count);
7285 * create control files in sysfs
7288 static int __init rbd_sysfs_init(void)
7292 ret = device_register(&rbd_root_dev);
7296 ret = bus_register(&rbd_bus_type);
7298 device_unregister(&rbd_root_dev);
7303 static void __exit rbd_sysfs_cleanup(void)
7305 bus_unregister(&rbd_bus_type);
7306 device_unregister(&rbd_root_dev);
7309 static int __init rbd_slab_init(void)
7311 rbd_assert(!rbd_img_request_cache);
7312 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
7313 if (!rbd_img_request_cache)
7316 rbd_assert(!rbd_obj_request_cache);
7317 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
7318 if (!rbd_obj_request_cache)
7324 kmem_cache_destroy(rbd_img_request_cache);
7325 rbd_img_request_cache = NULL;
7329 static void rbd_slab_exit(void)
7331 rbd_assert(rbd_obj_request_cache);
7332 kmem_cache_destroy(rbd_obj_request_cache);
7333 rbd_obj_request_cache = NULL;
7335 rbd_assert(rbd_img_request_cache);
7336 kmem_cache_destroy(rbd_img_request_cache);
7337 rbd_img_request_cache = NULL;
7340 static int __init rbd_init(void)
7344 if (!libceph_compatible(NULL)) {
7345 rbd_warn(NULL, "libceph incompatibility (quitting)");
7349 rc = rbd_slab_init();
7354 * The number of active work items is limited by the number of
7355 * rbd devices * queue depth, so leave @max_active at default.
7357 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
7364 rbd_major = register_blkdev(0, RBD_DRV_NAME);
7365 if (rbd_major < 0) {
7371 rc = rbd_sysfs_init();
7373 goto err_out_blkdev;
7376 pr_info("loaded (major %d)\n", rbd_major);
7378 pr_info("loaded\n");
7384 unregister_blkdev(rbd_major, RBD_DRV_NAME);
7386 destroy_workqueue(rbd_wq);
7392 static void __exit rbd_exit(void)
7394 ida_destroy(&rbd_dev_id_ida);
7395 rbd_sysfs_cleanup();
7397 unregister_blkdev(rbd_major, RBD_DRV_NAME);
7398 destroy_workqueue(rbd_wq);
7402 module_init(rbd_init);
7403 module_exit(rbd_exit);
7405 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
7406 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
7407 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
7408 /* following authorship retained from original osdblk.c */
7409 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
7411 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
7412 MODULE_LICENSE("GPL");