3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/cls_lock_client.h>
35 #include <linux/ceph/striper.h>
36 #include <linux/ceph/decode.h>
37 #include <linux/fs_parser.h>
38 #include <linux/bsearch.h>
40 #include <linux/kernel.h>
41 #include <linux/device.h>
42 #include <linux/module.h>
43 #include <linux/blk-mq.h>
45 #include <linux/blkdev.h>
46 #include <linux/slab.h>
47 #include <linux/idr.h>
48 #include <linux/workqueue.h>
50 #include "rbd_types.h"
52 #define RBD_DEBUG /* Activate rbd_assert() calls */
55 * Increment the given counter and return its updated value.
56 * If the counter is already 0 it will not be incremented.
57 * If the counter is already at its maximum value returns
58 * -EINVAL without updating it.
60 static int atomic_inc_return_safe(atomic_t *v)
64 counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0);
65 if (counter <= (unsigned int)INT_MAX)
73 /* Decrement the counter. Return the resulting value, or -EINVAL */
74 static int atomic_dec_return_safe(atomic_t *v)
78 counter = atomic_dec_return(v);
87 #define RBD_DRV_NAME "rbd"
89 #define RBD_MINORS_PER_MAJOR 256
90 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
92 #define RBD_MAX_PARENT_CHAIN_LEN 16
94 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
95 #define RBD_MAX_SNAP_NAME_LEN \
96 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
98 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
100 #define RBD_SNAP_HEAD_NAME "-"
102 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
104 /* This allows a single page to hold an image name sent by OSD */
105 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
106 #define RBD_IMAGE_ID_LEN_MAX 64
108 #define RBD_OBJ_PREFIX_LEN_MAX 64
110 #define RBD_NOTIFY_TIMEOUT 5 /* seconds */
111 #define RBD_RETRY_DELAY msecs_to_jiffies(1000)
115 #define RBD_FEATURE_LAYERING (1ULL<<0)
116 #define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
117 #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
118 #define RBD_FEATURE_OBJECT_MAP (1ULL<<3)
119 #define RBD_FEATURE_FAST_DIFF (1ULL<<4)
120 #define RBD_FEATURE_DEEP_FLATTEN (1ULL<<5)
121 #define RBD_FEATURE_DATA_POOL (1ULL<<7)
122 #define RBD_FEATURE_OPERATIONS (1ULL<<8)
124 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
125 RBD_FEATURE_STRIPINGV2 | \
126 RBD_FEATURE_EXCLUSIVE_LOCK | \
127 RBD_FEATURE_OBJECT_MAP | \
128 RBD_FEATURE_FAST_DIFF | \
129 RBD_FEATURE_DEEP_FLATTEN | \
130 RBD_FEATURE_DATA_POOL | \
131 RBD_FEATURE_OPERATIONS)
133 /* Features supported by this (client software) implementation. */
135 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
138 * An RBD device name will be "rbd#", where the "rbd" comes from
139 * RBD_DRV_NAME above, and # is a unique integer identifier.
141 #define DEV_NAME_LEN 32
144 * block device image metadata (in-memory version)
146 struct rbd_image_header {
147 /* These six fields never change for a given rbd image */
153 u64 features; /* Might be changeable someday? */
155 /* The remaining fields need to be updated occasionally */
157 struct ceph_snap_context *snapc;
158 char *snap_names; /* format 1 only */
159 u64 *snap_sizes; /* format 1 only */
163 * An rbd image specification.
165 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
166 * identify an image. Each rbd_dev structure includes a pointer to
167 * an rbd_spec structure that encapsulates this identity.
169 * Each of the id's in an rbd_spec has an associated name. For a
170 * user-mapped image, the names are supplied and the id's associated
171 * with them are looked up. For a layered image, a parent image is
172 * defined by the tuple, and the names are looked up.
174 * An rbd_dev structure contains a parent_spec pointer which is
175 * non-null if the image it represents is a child in a layered
176 * image. This pointer will refer to the rbd_spec structure used
177 * by the parent rbd_dev for its own identity (i.e., the structure
178 * is shared between the parent and child).
180 * Since these structures are populated once, during the discovery
181 * phase of image construction, they are effectively immutable so
182 * we make no effort to synchronize access to them.
184 * Note that code herein does not assume the image name is known (it
185 * could be a null pointer).
189 const char *pool_name;
190 const char *pool_ns; /* NULL if default, never "" */
192 const char *image_id;
193 const char *image_name;
196 const char *snap_name;
202 * an instance of the client. multiple devices may share an rbd client.
205 struct ceph_client *client;
207 struct list_head node;
210 struct pending_result {
211 int result; /* first nonzero result */
215 struct rbd_img_request;
217 enum obj_request_type {
218 OBJ_REQUEST_NODATA = 1,
219 OBJ_REQUEST_BIO, /* pointer into provided bio (list) */
220 OBJ_REQUEST_BVECS, /* pointer into provided bio_vec array */
221 OBJ_REQUEST_OWN_BVECS, /* private bio_vec array, doesn't own pages */
224 enum obj_operation_type {
231 #define RBD_OBJ_FLAG_DELETION (1U << 0)
232 #define RBD_OBJ_FLAG_COPYUP_ENABLED (1U << 1)
233 #define RBD_OBJ_FLAG_COPYUP_ZEROS (1U << 2)
234 #define RBD_OBJ_FLAG_MAY_EXIST (1U << 3)
235 #define RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT (1U << 4)
237 enum rbd_obj_read_state {
238 RBD_OBJ_READ_START = 1,
244 * Writes go through the following state machine to deal with
247 * . . . . . RBD_OBJ_WRITE_GUARD. . . . . . . . . . . . . .
250 * . RBD_OBJ_WRITE_READ_FROM_PARENT. . . .
252 * . v v (deep-copyup .
253 * (image . RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC . not needed) .
256 * . . . .RBD_OBJ_WRITE_COPYUP_OPS. . . . . (copyup .
259 * done . . . . . . . . . . . . . . . . . .
264 * Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether
265 * assert_exists guard is needed or not (in some cases it's not needed
266 * even if there is a parent).
268 enum rbd_obj_write_state {
269 RBD_OBJ_WRITE_START = 1,
270 RBD_OBJ_WRITE_PRE_OBJECT_MAP,
271 RBD_OBJ_WRITE_OBJECT,
272 __RBD_OBJ_WRITE_COPYUP,
273 RBD_OBJ_WRITE_COPYUP,
274 RBD_OBJ_WRITE_POST_OBJECT_MAP,
277 enum rbd_obj_copyup_state {
278 RBD_OBJ_COPYUP_START = 1,
279 RBD_OBJ_COPYUP_READ_PARENT,
280 __RBD_OBJ_COPYUP_OBJECT_MAPS,
281 RBD_OBJ_COPYUP_OBJECT_MAPS,
282 __RBD_OBJ_COPYUP_WRITE_OBJECT,
283 RBD_OBJ_COPYUP_WRITE_OBJECT,
286 struct rbd_obj_request {
287 struct ceph_object_extent ex;
288 unsigned int flags; /* RBD_OBJ_FLAG_* */
290 enum rbd_obj_read_state read_state; /* for reads */
291 enum rbd_obj_write_state write_state; /* for writes */
294 struct rbd_img_request *img_request;
295 struct ceph_file_extent *img_extents;
299 struct ceph_bio_iter bio_pos;
301 struct ceph_bvec_iter bvec_pos;
307 enum rbd_obj_copyup_state copyup_state;
308 struct bio_vec *copyup_bvecs;
309 u32 copyup_bvec_count;
311 struct list_head osd_reqs; /* w/ r_private_item */
313 struct mutex state_mutex;
314 struct pending_result pending;
319 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
320 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
325 RBD_IMG_EXCLUSIVE_LOCK,
326 __RBD_IMG_OBJECT_REQUESTS,
327 RBD_IMG_OBJECT_REQUESTS,
330 struct rbd_img_request {
331 struct rbd_device *rbd_dev;
332 enum obj_operation_type op_type;
333 enum obj_request_type data_type;
335 enum rbd_img_state state;
337 u64 snap_id; /* for reads */
338 struct ceph_snap_context *snapc; /* for writes */
340 struct rbd_obj_request *obj_request; /* obj req initiator */
342 struct list_head lock_item;
343 struct list_head object_extents; /* obj_req.ex structs */
345 struct mutex state_mutex;
346 struct pending_result pending;
347 struct work_struct work;
351 #define for_each_obj_request(ireq, oreq) \
352 list_for_each_entry(oreq, &(ireq)->object_extents, ex.oe_item)
353 #define for_each_obj_request_safe(ireq, oreq, n) \
354 list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item)
356 enum rbd_watch_state {
357 RBD_WATCH_STATE_UNREGISTERED,
358 RBD_WATCH_STATE_REGISTERED,
359 RBD_WATCH_STATE_ERROR,
362 enum rbd_lock_state {
363 RBD_LOCK_STATE_UNLOCKED,
364 RBD_LOCK_STATE_LOCKED,
365 RBD_LOCK_STATE_RELEASING,
368 /* WatchNotify::ClientId */
369 struct rbd_client_id {
382 int dev_id; /* blkdev unique id */
384 int major; /* blkdev assigned major */
386 struct gendisk *disk; /* blkdev's gendisk and rq */
388 u32 image_format; /* Either 1 or 2 */
389 struct rbd_client *rbd_client;
391 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
393 spinlock_t lock; /* queue, flags, open_count */
395 struct rbd_image_header header;
396 unsigned long flags; /* possibly lock protected */
397 struct rbd_spec *spec;
398 struct rbd_options *opts;
399 char *config_info; /* add{,_single_major} string */
401 struct ceph_object_id header_oid;
402 struct ceph_object_locator header_oloc;
404 struct ceph_file_layout layout; /* used for all rbd requests */
406 struct mutex watch_mutex;
407 enum rbd_watch_state watch_state;
408 struct ceph_osd_linger_request *watch_handle;
410 struct delayed_work watch_dwork;
412 struct rw_semaphore lock_rwsem;
413 enum rbd_lock_state lock_state;
414 char lock_cookie[32];
415 struct rbd_client_id owner_cid;
416 struct work_struct acquired_lock_work;
417 struct work_struct released_lock_work;
418 struct delayed_work lock_dwork;
419 struct work_struct unlock_work;
420 spinlock_t lock_lists_lock;
421 struct list_head acquiring_list;
422 struct list_head running_list;
423 struct completion acquire_wait;
425 struct completion releasing_wait;
427 spinlock_t object_map_lock;
429 u64 object_map_size; /* in objects */
430 u64 object_map_flags;
432 struct workqueue_struct *task_wq;
434 struct rbd_spec *parent_spec;
437 struct rbd_device *parent;
439 /* Block layer tags. */
440 struct blk_mq_tag_set tag_set;
442 /* protects updating the header */
443 struct rw_semaphore header_rwsem;
445 struct rbd_mapping mapping;
447 struct list_head node;
451 unsigned long open_count; /* protected by lock */
455 * Flag bits for rbd_dev->flags:
456 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
460 RBD_DEV_FLAG_EXISTS, /* rbd_dev_device_setup() ran */
461 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
462 RBD_DEV_FLAG_READONLY, /* -o ro or snapshot */
465 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
467 static LIST_HEAD(rbd_dev_list); /* devices */
468 static DEFINE_SPINLOCK(rbd_dev_list_lock);
470 static LIST_HEAD(rbd_client_list); /* clients */
471 static DEFINE_SPINLOCK(rbd_client_list_lock);
473 /* Slab caches for frequently-allocated structures */
475 static struct kmem_cache *rbd_img_request_cache;
476 static struct kmem_cache *rbd_obj_request_cache;
478 static int rbd_major;
479 static DEFINE_IDA(rbd_dev_id_ida);
481 static struct workqueue_struct *rbd_wq;
483 static struct ceph_snap_context rbd_empty_snapc = {
484 .nref = REFCOUNT_INIT(1),
488 * single-major requires >= 0.75 version of userspace rbd utility.
490 static bool single_major = true;
491 module_param(single_major, bool, 0444);
492 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
494 static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count);
495 static ssize_t remove_store(struct bus_type *bus, const char *buf,
497 static ssize_t add_single_major_store(struct bus_type *bus, const char *buf,
499 static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf,
501 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
503 static int rbd_dev_id_to_minor(int dev_id)
505 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
508 static int minor_to_rbd_dev_id(int minor)
510 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
513 static bool rbd_is_ro(struct rbd_device *rbd_dev)
515 return test_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
518 static bool rbd_is_snap(struct rbd_device *rbd_dev)
520 return rbd_dev->spec->snap_id != CEPH_NOSNAP;
523 static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
525 lockdep_assert_held(&rbd_dev->lock_rwsem);
527 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
528 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
531 static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
535 down_read(&rbd_dev->lock_rwsem);
536 is_lock_owner = __rbd_is_lock_owner(rbd_dev);
537 up_read(&rbd_dev->lock_rwsem);
538 return is_lock_owner;
541 static ssize_t supported_features_show(struct bus_type *bus, char *buf)
543 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
546 static BUS_ATTR_WO(add);
547 static BUS_ATTR_WO(remove);
548 static BUS_ATTR_WO(add_single_major);
549 static BUS_ATTR_WO(remove_single_major);
550 static BUS_ATTR_RO(supported_features);
552 static struct attribute *rbd_bus_attrs[] = {
554 &bus_attr_remove.attr,
555 &bus_attr_add_single_major.attr,
556 &bus_attr_remove_single_major.attr,
557 &bus_attr_supported_features.attr,
561 static umode_t rbd_bus_is_visible(struct kobject *kobj,
562 struct attribute *attr, int index)
565 (attr == &bus_attr_add_single_major.attr ||
566 attr == &bus_attr_remove_single_major.attr))
572 static const struct attribute_group rbd_bus_group = {
573 .attrs = rbd_bus_attrs,
574 .is_visible = rbd_bus_is_visible,
576 __ATTRIBUTE_GROUPS(rbd_bus);
578 static struct bus_type rbd_bus_type = {
580 .bus_groups = rbd_bus_groups,
583 static void rbd_root_dev_release(struct device *dev)
587 static struct device rbd_root_dev = {
589 .release = rbd_root_dev_release,
592 static __printf(2, 3)
593 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
595 struct va_format vaf;
603 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
604 else if (rbd_dev->disk)
605 printk(KERN_WARNING "%s: %s: %pV\n",
606 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
607 else if (rbd_dev->spec && rbd_dev->spec->image_name)
608 printk(KERN_WARNING "%s: image %s: %pV\n",
609 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
610 else if (rbd_dev->spec && rbd_dev->spec->image_id)
611 printk(KERN_WARNING "%s: id %s: %pV\n",
612 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
614 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
615 RBD_DRV_NAME, rbd_dev, &vaf);
620 #define rbd_assert(expr) \
621 if (unlikely(!(expr))) { \
622 printk(KERN_ERR "\nAssertion failure in %s() " \
624 "\trbd_assert(%s);\n\n", \
625 __func__, __LINE__, #expr); \
628 #else /* !RBD_DEBUG */
629 # define rbd_assert(expr) ((void) 0)
630 #endif /* !RBD_DEBUG */
632 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
634 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
635 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
636 static int rbd_dev_header_info(struct rbd_device *rbd_dev);
637 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
638 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
640 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
641 u8 *order, u64 *snap_size);
642 static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev);
644 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result);
645 static void rbd_img_handle_request(struct rbd_img_request *img_req, int result);
648 * Return true if nothing else is pending.
650 static bool pending_result_dec(struct pending_result *pending, int *result)
652 rbd_assert(pending->num_pending > 0);
654 if (*result && !pending->result)
655 pending->result = *result;
656 if (--pending->num_pending)
659 *result = pending->result;
663 static int rbd_open(struct block_device *bdev, fmode_t mode)
665 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
666 bool removing = false;
668 spin_lock_irq(&rbd_dev->lock);
669 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
672 rbd_dev->open_count++;
673 spin_unlock_irq(&rbd_dev->lock);
677 (void) get_device(&rbd_dev->dev);
682 static void rbd_release(struct gendisk *disk, fmode_t mode)
684 struct rbd_device *rbd_dev = disk->private_data;
685 unsigned long open_count_before;
687 spin_lock_irq(&rbd_dev->lock);
688 open_count_before = rbd_dev->open_count--;
689 spin_unlock_irq(&rbd_dev->lock);
690 rbd_assert(open_count_before > 0);
692 put_device(&rbd_dev->dev);
695 static const struct block_device_operations rbd_bd_ops = {
696 .owner = THIS_MODULE,
698 .release = rbd_release,
702 * Initialize an rbd client instance. Success or not, this function
703 * consumes ceph_opts. Caller holds client_mutex.
705 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
707 struct rbd_client *rbdc;
710 dout("%s:\n", __func__);
711 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
715 kref_init(&rbdc->kref);
716 INIT_LIST_HEAD(&rbdc->node);
718 rbdc->client = ceph_create_client(ceph_opts, rbdc);
719 if (IS_ERR(rbdc->client))
721 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
723 ret = ceph_open_session(rbdc->client);
727 spin_lock(&rbd_client_list_lock);
728 list_add_tail(&rbdc->node, &rbd_client_list);
729 spin_unlock(&rbd_client_list_lock);
731 dout("%s: rbdc %p\n", __func__, rbdc);
735 ceph_destroy_client(rbdc->client);
740 ceph_destroy_options(ceph_opts);
741 dout("%s: error %d\n", __func__, ret);
746 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
748 kref_get(&rbdc->kref);
754 * Find a ceph client with specific addr and configuration. If
755 * found, bump its reference count.
757 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
759 struct rbd_client *client_node;
762 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
765 spin_lock(&rbd_client_list_lock);
766 list_for_each_entry(client_node, &rbd_client_list, node) {
767 if (!ceph_compare_options(ceph_opts, client_node->client)) {
768 __rbd_get_client(client_node);
774 spin_unlock(&rbd_client_list_lock);
776 return found ? client_node : NULL;
780 * (Per device) rbd map options
788 Opt_compression_hint,
789 /* string args above */
798 Opt_compression_hint_none,
799 Opt_compression_hint_compressible,
800 Opt_compression_hint_incompressible,
803 static const struct constant_table rbd_param_compression_hint[] = {
804 {"none", Opt_compression_hint_none},
805 {"compressible", Opt_compression_hint_compressible},
806 {"incompressible", Opt_compression_hint_incompressible},
810 static const struct fs_parameter_spec rbd_parameters[] = {
811 fsparam_u32 ("alloc_size", Opt_alloc_size),
812 fsparam_enum ("compression_hint", Opt_compression_hint,
813 rbd_param_compression_hint),
814 fsparam_flag ("exclusive", Opt_exclusive),
815 fsparam_flag ("lock_on_read", Opt_lock_on_read),
816 fsparam_u32 ("lock_timeout", Opt_lock_timeout),
817 fsparam_flag ("notrim", Opt_notrim),
818 fsparam_string ("_pool_ns", Opt_pool_ns),
819 fsparam_u32 ("queue_depth", Opt_queue_depth),
820 fsparam_flag ("read_only", Opt_read_only),
821 fsparam_flag ("read_write", Opt_read_write),
822 fsparam_flag ("ro", Opt_read_only),
823 fsparam_flag ("rw", Opt_read_write),
830 unsigned long lock_timeout;
836 u32 alloc_hint_flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */
839 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
840 #define RBD_ALLOC_SIZE_DEFAULT (64 * 1024)
841 #define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
842 #define RBD_READ_ONLY_DEFAULT false
843 #define RBD_LOCK_ON_READ_DEFAULT false
844 #define RBD_EXCLUSIVE_DEFAULT false
845 #define RBD_TRIM_DEFAULT true
847 struct rbd_parse_opts_ctx {
848 struct rbd_spec *spec;
849 struct ceph_options *copts;
850 struct rbd_options *opts;
853 static char* obj_op_name(enum obj_operation_type op_type)
870 * Destroy ceph client
872 * Caller must hold rbd_client_list_lock.
874 static void rbd_client_release(struct kref *kref)
876 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
878 dout("%s: rbdc %p\n", __func__, rbdc);
879 spin_lock(&rbd_client_list_lock);
880 list_del(&rbdc->node);
881 spin_unlock(&rbd_client_list_lock);
883 ceph_destroy_client(rbdc->client);
888 * Drop reference to ceph client node. If it's not referenced anymore, release
891 static void rbd_put_client(struct rbd_client *rbdc)
894 kref_put(&rbdc->kref, rbd_client_release);
898 * Get a ceph client with specific addr and configuration, if one does
899 * not exist create it. Either way, ceph_opts is consumed by this
902 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
904 struct rbd_client *rbdc;
907 mutex_lock(&client_mutex);
908 rbdc = rbd_client_find(ceph_opts);
910 ceph_destroy_options(ceph_opts);
913 * Using an existing client. Make sure ->pg_pools is up to
914 * date before we look up the pool id in do_rbd_add().
916 ret = ceph_wait_for_latest_osdmap(rbdc->client,
917 rbdc->client->options->mount_timeout);
919 rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
920 rbd_put_client(rbdc);
924 rbdc = rbd_client_create(ceph_opts);
926 mutex_unlock(&client_mutex);
931 static bool rbd_image_format_valid(u32 image_format)
933 return image_format == 1 || image_format == 2;
936 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
941 /* The header has to start with the magic rbd header text */
942 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
945 /* The bio layer requires at least sector-sized I/O */
947 if (ondisk->options.order < SECTOR_SHIFT)
950 /* If we use u64 in a few spots we may be able to loosen this */
952 if (ondisk->options.order > 8 * sizeof (int) - 1)
956 * The size of a snapshot header has to fit in a size_t, and
957 * that limits the number of snapshots.
959 snap_count = le32_to_cpu(ondisk->snap_count);
960 size = SIZE_MAX - sizeof (struct ceph_snap_context);
961 if (snap_count > size / sizeof (__le64))
965 * Not only that, but the size of the entire the snapshot
966 * header must also be representable in a size_t.
968 size -= snap_count * sizeof (__le64);
969 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
976 * returns the size of an object in the image
978 static u32 rbd_obj_bytes(struct rbd_image_header *header)
980 return 1U << header->obj_order;
983 static void rbd_init_layout(struct rbd_device *rbd_dev)
985 if (rbd_dev->header.stripe_unit == 0 ||
986 rbd_dev->header.stripe_count == 0) {
987 rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header);
988 rbd_dev->header.stripe_count = 1;
991 rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit;
992 rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count;
993 rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header);
994 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ?
995 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id;
996 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
1000 * Fill an rbd image header with information from the given format 1
1003 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
1004 struct rbd_image_header_ondisk *ondisk)
1006 struct rbd_image_header *header = &rbd_dev->header;
1007 bool first_time = header->object_prefix == NULL;
1008 struct ceph_snap_context *snapc;
1009 char *object_prefix = NULL;
1010 char *snap_names = NULL;
1011 u64 *snap_sizes = NULL;
1016 /* Allocate this now to avoid having to handle failure below */
1019 object_prefix = kstrndup(ondisk->object_prefix,
1020 sizeof(ondisk->object_prefix),
1026 /* Allocate the snapshot context and fill it in */
1028 snap_count = le32_to_cpu(ondisk->snap_count);
1029 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
1032 snapc->seq = le64_to_cpu(ondisk->snap_seq);
1034 struct rbd_image_snap_ondisk *snaps;
1035 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
1037 /* We'll keep a copy of the snapshot names... */
1039 if (snap_names_len > (u64)SIZE_MAX)
1041 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
1045 /* ...as well as the array of their sizes. */
1046 snap_sizes = kmalloc_array(snap_count,
1047 sizeof(*header->snap_sizes),
1053 * Copy the names, and fill in each snapshot's id
1056 * Note that rbd_dev_v1_header_info() guarantees the
1057 * ondisk buffer we're working with has
1058 * snap_names_len bytes beyond the end of the
1059 * snapshot id array, this memcpy() is safe.
1061 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
1062 snaps = ondisk->snaps;
1063 for (i = 0; i < snap_count; i++) {
1064 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
1065 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
1069 /* We won't fail any more, fill in the header */
1072 header->object_prefix = object_prefix;
1073 header->obj_order = ondisk->options.order;
1074 rbd_init_layout(rbd_dev);
1076 ceph_put_snap_context(header->snapc);
1077 kfree(header->snap_names);
1078 kfree(header->snap_sizes);
1081 /* The remaining fields always get updated (when we refresh) */
1083 header->image_size = le64_to_cpu(ondisk->image_size);
1084 header->snapc = snapc;
1085 header->snap_names = snap_names;
1086 header->snap_sizes = snap_sizes;
1094 ceph_put_snap_context(snapc);
1095 kfree(object_prefix);
1100 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1102 const char *snap_name;
1104 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1106 /* Skip over names until we find the one we are looking for */
1108 snap_name = rbd_dev->header.snap_names;
1110 snap_name += strlen(snap_name) + 1;
1112 return kstrdup(snap_name, GFP_KERNEL);
1116 * Snapshot id comparison function for use with qsort()/bsearch().
1117 * Note that result is for snapshots in *descending* order.
1119 static int snapid_compare_reverse(const void *s1, const void *s2)
1121 u64 snap_id1 = *(u64 *)s1;
1122 u64 snap_id2 = *(u64 *)s2;
1124 if (snap_id1 < snap_id2)
1126 return snap_id1 == snap_id2 ? 0 : -1;
1130 * Search a snapshot context to see if the given snapshot id is
1133 * Returns the position of the snapshot id in the array if it's found,
1134 * or BAD_SNAP_INDEX otherwise.
1136 * Note: The snapshot array is in kept sorted (by the osd) in
1137 * reverse order, highest snapshot id first.
1139 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1141 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1144 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1145 sizeof (snap_id), snapid_compare_reverse);
1147 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1150 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1154 const char *snap_name;
1156 which = rbd_dev_snap_index(rbd_dev, snap_id);
1157 if (which == BAD_SNAP_INDEX)
1158 return ERR_PTR(-ENOENT);
1160 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1161 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1164 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1166 if (snap_id == CEPH_NOSNAP)
1167 return RBD_SNAP_HEAD_NAME;
1169 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1170 if (rbd_dev->image_format == 1)
1171 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1173 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1176 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1179 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1180 if (snap_id == CEPH_NOSNAP) {
1181 *snap_size = rbd_dev->header.image_size;
1182 } else if (rbd_dev->image_format == 1) {
1185 which = rbd_dev_snap_index(rbd_dev, snap_id);
1186 if (which == BAD_SNAP_INDEX)
1189 *snap_size = rbd_dev->header.snap_sizes[which];
1194 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1203 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1205 u64 snap_id = rbd_dev->spec->snap_id;
1209 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1213 rbd_dev->mapping.size = size;
1217 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1219 rbd_dev->mapping.size = 0;
1222 static void zero_bvec(struct bio_vec *bv)
1225 unsigned long flags;
1227 buf = bvec_kmap_irq(bv, &flags);
1228 memset(buf, 0, bv->bv_len);
1229 flush_dcache_page(bv->bv_page);
1230 bvec_kunmap_irq(buf, &flags);
1233 static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes)
1235 struct ceph_bio_iter it = *bio_pos;
1237 ceph_bio_iter_advance(&it, off);
1238 ceph_bio_iter_advance_step(&it, bytes, ({
1243 static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes)
1245 struct ceph_bvec_iter it = *bvec_pos;
1247 ceph_bvec_iter_advance(&it, off);
1248 ceph_bvec_iter_advance_step(&it, bytes, ({
1254 * Zero a range in @obj_req data buffer defined by a bio (list) or
1255 * (private) bio_vec array.
1257 * @off is relative to the start of the data buffer.
1259 static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off,
1262 dout("%s %p data buf %u~%u\n", __func__, obj_req, off, bytes);
1264 switch (obj_req->img_request->data_type) {
1265 case OBJ_REQUEST_BIO:
1266 zero_bios(&obj_req->bio_pos, off, bytes);
1268 case OBJ_REQUEST_BVECS:
1269 case OBJ_REQUEST_OWN_BVECS:
1270 zero_bvecs(&obj_req->bvec_pos, off, bytes);
1277 static void rbd_obj_request_destroy(struct kref *kref);
1278 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1280 rbd_assert(obj_request != NULL);
1281 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1282 kref_read(&obj_request->kref));
1283 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1286 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1287 struct rbd_obj_request *obj_request)
1289 rbd_assert(obj_request->img_request == NULL);
1291 /* Image request now owns object's original reference */
1292 obj_request->img_request = img_request;
1293 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1296 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1297 struct rbd_obj_request *obj_request)
1299 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1300 list_del(&obj_request->ex.oe_item);
1301 rbd_assert(obj_request->img_request == img_request);
1302 rbd_obj_request_put(obj_request);
1305 static void rbd_osd_submit(struct ceph_osd_request *osd_req)
1307 struct rbd_obj_request *obj_req = osd_req->r_priv;
1309 dout("%s osd_req %p for obj_req %p objno %llu %llu~%llu\n",
1310 __func__, osd_req, obj_req, obj_req->ex.oe_objno,
1311 obj_req->ex.oe_off, obj_req->ex.oe_len);
1312 ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
1316 * The default/initial value for all image request flags is 0. Each
1317 * is conditionally set to 1 at image request initialization time
1318 * and currently never change thereafter.
1320 static void img_request_layered_set(struct rbd_img_request *img_request)
1322 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1325 static bool img_request_layered_test(struct rbd_img_request *img_request)
1327 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1330 static bool rbd_obj_is_entire(struct rbd_obj_request *obj_req)
1332 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1334 return !obj_req->ex.oe_off &&
1335 obj_req->ex.oe_len == rbd_dev->layout.object_size;
1338 static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
1340 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1342 return obj_req->ex.oe_off + obj_req->ex.oe_len ==
1343 rbd_dev->layout.object_size;
1347 * Must be called after rbd_obj_calc_img_extents().
1349 static bool rbd_obj_copyup_enabled(struct rbd_obj_request *obj_req)
1351 if (!obj_req->num_img_extents ||
1352 (rbd_obj_is_entire(obj_req) &&
1353 !obj_req->img_request->snapc->num_snaps))
1359 static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
1361 return ceph_file_extents_bytes(obj_req->img_extents,
1362 obj_req->num_img_extents);
1365 static bool rbd_img_is_write(struct rbd_img_request *img_req)
1367 switch (img_req->op_type) {
1371 case OBJ_OP_DISCARD:
1372 case OBJ_OP_ZEROOUT:
1379 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
1381 struct rbd_obj_request *obj_req = osd_req->r_priv;
1384 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
1385 osd_req->r_result, obj_req);
1388 * Writes aren't allowed to return a data payload. In some
1389 * guarded write cases (e.g. stat + zero on an empty object)
1390 * a stat response makes it through, but we don't care.
1392 if (osd_req->r_result > 0 && rbd_img_is_write(obj_req->img_request))
1395 result = osd_req->r_result;
1397 rbd_obj_handle_request(obj_req, result);
1400 static void rbd_osd_format_read(struct ceph_osd_request *osd_req)
1402 struct rbd_obj_request *obj_request = osd_req->r_priv;
1403 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1404 struct ceph_options *opt = rbd_dev->rbd_client->client->options;
1406 osd_req->r_flags = CEPH_OSD_FLAG_READ | opt->read_from_replica;
1407 osd_req->r_snapid = obj_request->img_request->snap_id;
1410 static void rbd_osd_format_write(struct ceph_osd_request *osd_req)
1412 struct rbd_obj_request *obj_request = osd_req->r_priv;
1414 osd_req->r_flags = CEPH_OSD_FLAG_WRITE;
1415 ktime_get_real_ts64(&osd_req->r_mtime);
1416 osd_req->r_data_offset = obj_request->ex.oe_off;
1419 static struct ceph_osd_request *
1420 __rbd_obj_add_osd_request(struct rbd_obj_request *obj_req,
1421 struct ceph_snap_context *snapc, int num_ops)
1423 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1424 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1425 struct ceph_osd_request *req;
1426 const char *name_format = rbd_dev->image_format == 1 ?
1427 RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT;
1430 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, GFP_NOIO);
1432 return ERR_PTR(-ENOMEM);
1434 list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
1435 req->r_callback = rbd_osd_req_callback;
1436 req->r_priv = obj_req;
1439 * Data objects may be stored in a separate pool, but always in
1440 * the same namespace in that pool as the header in its pool.
1442 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
1443 req->r_base_oloc.pool = rbd_dev->layout.pool_id;
1445 ret = ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format,
1446 rbd_dev->header.object_prefix,
1447 obj_req->ex.oe_objno);
1449 return ERR_PTR(ret);
1454 static struct ceph_osd_request *
1455 rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, int num_ops)
1457 return __rbd_obj_add_osd_request(obj_req, obj_req->img_request->snapc,
1461 static struct rbd_obj_request *rbd_obj_request_create(void)
1463 struct rbd_obj_request *obj_request;
1465 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
1469 ceph_object_extent_init(&obj_request->ex);
1470 INIT_LIST_HEAD(&obj_request->osd_reqs);
1471 mutex_init(&obj_request->state_mutex);
1472 kref_init(&obj_request->kref);
1474 dout("%s %p\n", __func__, obj_request);
1478 static void rbd_obj_request_destroy(struct kref *kref)
1480 struct rbd_obj_request *obj_request;
1481 struct ceph_osd_request *osd_req;
1484 obj_request = container_of(kref, struct rbd_obj_request, kref);
1486 dout("%s: obj %p\n", __func__, obj_request);
1488 while (!list_empty(&obj_request->osd_reqs)) {
1489 osd_req = list_first_entry(&obj_request->osd_reqs,
1490 struct ceph_osd_request, r_private_item);
1491 list_del_init(&osd_req->r_private_item);
1492 ceph_osdc_put_request(osd_req);
1495 switch (obj_request->img_request->data_type) {
1496 case OBJ_REQUEST_NODATA:
1497 case OBJ_REQUEST_BIO:
1498 case OBJ_REQUEST_BVECS:
1499 break; /* Nothing to do */
1500 case OBJ_REQUEST_OWN_BVECS:
1501 kfree(obj_request->bvec_pos.bvecs);
1507 kfree(obj_request->img_extents);
1508 if (obj_request->copyup_bvecs) {
1509 for (i = 0; i < obj_request->copyup_bvec_count; i++) {
1510 if (obj_request->copyup_bvecs[i].bv_page)
1511 __free_page(obj_request->copyup_bvecs[i].bv_page);
1513 kfree(obj_request->copyup_bvecs);
1516 kmem_cache_free(rbd_obj_request_cache, obj_request);
1519 /* It's OK to call this for a device with no parent */
1521 static void rbd_spec_put(struct rbd_spec *spec);
1522 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1524 rbd_dev_remove_parent(rbd_dev);
1525 rbd_spec_put(rbd_dev->parent_spec);
1526 rbd_dev->parent_spec = NULL;
1527 rbd_dev->parent_overlap = 0;
1531 * Parent image reference counting is used to determine when an
1532 * image's parent fields can be safely torn down--after there are no
1533 * more in-flight requests to the parent image. When the last
1534 * reference is dropped, cleaning them up is safe.
1536 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1540 if (!rbd_dev->parent_spec)
1543 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1547 /* Last reference; clean up parent data structures */
1550 rbd_dev_unparent(rbd_dev);
1552 rbd_warn(rbd_dev, "parent reference underflow");
1556 * If an image has a non-zero parent overlap, get a reference to its
1559 * Returns true if the rbd device has a parent with a non-zero
1560 * overlap and a reference for it was successfully taken, or
1563 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1567 if (!rbd_dev->parent_spec)
1570 if (rbd_dev->parent_overlap)
1571 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1574 rbd_warn(rbd_dev, "parent reference overflow");
1579 static void rbd_img_request_init(struct rbd_img_request *img_request,
1580 struct rbd_device *rbd_dev,
1581 enum obj_operation_type op_type)
1583 memset(img_request, 0, sizeof(*img_request));
1585 img_request->rbd_dev = rbd_dev;
1586 img_request->op_type = op_type;
1588 INIT_LIST_HEAD(&img_request->lock_item);
1589 INIT_LIST_HEAD(&img_request->object_extents);
1590 mutex_init(&img_request->state_mutex);
1593 static void rbd_img_capture_header(struct rbd_img_request *img_req)
1595 struct rbd_device *rbd_dev = img_req->rbd_dev;
1597 lockdep_assert_held(&rbd_dev->header_rwsem);
1599 if (rbd_img_is_write(img_req))
1600 img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
1602 img_req->snap_id = rbd_dev->spec->snap_id;
1604 if (rbd_dev_parent_get(rbd_dev))
1605 img_request_layered_set(img_req);
1608 static void rbd_img_request_destroy(struct rbd_img_request *img_request)
1610 struct rbd_obj_request *obj_request;
1611 struct rbd_obj_request *next_obj_request;
1613 dout("%s: img %p\n", __func__, img_request);
1615 WARN_ON(!list_empty(&img_request->lock_item));
1616 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1617 rbd_img_obj_request_del(img_request, obj_request);
1619 if (img_request_layered_test(img_request))
1620 rbd_dev_parent_put(img_request->rbd_dev);
1622 if (rbd_img_is_write(img_request))
1623 ceph_put_snap_context(img_request->snapc);
1625 if (test_bit(IMG_REQ_CHILD, &img_request->flags))
1626 kmem_cache_free(rbd_img_request_cache, img_request);
1629 #define BITS_PER_OBJ 2
1630 #define OBJS_PER_BYTE (BITS_PER_BYTE / BITS_PER_OBJ)
1631 #define OBJ_MASK ((1 << BITS_PER_OBJ) - 1)
1633 static void __rbd_object_map_index(struct rbd_device *rbd_dev, u64 objno,
1634 u64 *index, u8 *shift)
1638 rbd_assert(objno < rbd_dev->object_map_size);
1639 *index = div_u64_rem(objno, OBJS_PER_BYTE, &off);
1640 *shift = (OBJS_PER_BYTE - off - 1) * BITS_PER_OBJ;
1643 static u8 __rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1648 lockdep_assert_held(&rbd_dev->object_map_lock);
1649 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1650 return (rbd_dev->object_map[index] >> shift) & OBJ_MASK;
1653 static void __rbd_object_map_set(struct rbd_device *rbd_dev, u64 objno, u8 val)
1659 lockdep_assert_held(&rbd_dev->object_map_lock);
1660 rbd_assert(!(val & ~OBJ_MASK));
1662 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1663 p = &rbd_dev->object_map[index];
1664 *p = (*p & ~(OBJ_MASK << shift)) | (val << shift);
1667 static u8 rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1671 spin_lock(&rbd_dev->object_map_lock);
1672 state = __rbd_object_map_get(rbd_dev, objno);
1673 spin_unlock(&rbd_dev->object_map_lock);
1677 static bool use_object_map(struct rbd_device *rbd_dev)
1680 * An image mapped read-only can't use the object map -- it isn't
1681 * loaded because the header lock isn't acquired. Someone else can
1682 * write to the image and update the object map behind our back.
1684 * A snapshot can't be written to, so using the object map is always
1687 if (!rbd_is_snap(rbd_dev) && rbd_is_ro(rbd_dev))
1690 return ((rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) &&
1691 !(rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID));
1694 static bool rbd_object_map_may_exist(struct rbd_device *rbd_dev, u64 objno)
1698 /* fall back to default logic if object map is disabled or invalid */
1699 if (!use_object_map(rbd_dev))
1702 state = rbd_object_map_get(rbd_dev, objno);
1703 return state != OBJECT_NONEXISTENT;
1706 static void rbd_object_map_name(struct rbd_device *rbd_dev, u64 snap_id,
1707 struct ceph_object_id *oid)
1709 if (snap_id == CEPH_NOSNAP)
1710 ceph_oid_printf(oid, "%s%s", RBD_OBJECT_MAP_PREFIX,
1711 rbd_dev->spec->image_id);
1713 ceph_oid_printf(oid, "%s%s.%016llx", RBD_OBJECT_MAP_PREFIX,
1714 rbd_dev->spec->image_id, snap_id);
1717 static int rbd_object_map_lock(struct rbd_device *rbd_dev)
1719 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1720 CEPH_DEFINE_OID_ONSTACK(oid);
1723 struct ceph_locker *lockers;
1725 bool broke_lock = false;
1728 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1731 ret = ceph_cls_lock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1732 CEPH_CLS_LOCK_EXCLUSIVE, "", "", "", 0);
1733 if (ret != -EBUSY || broke_lock) {
1735 ret = 0; /* already locked by myself */
1737 rbd_warn(rbd_dev, "failed to lock object map: %d", ret);
1741 ret = ceph_cls_lock_info(osdc, &oid, &rbd_dev->header_oloc,
1742 RBD_LOCK_NAME, &lock_type, &lock_tag,
1743 &lockers, &num_lockers);
1748 rbd_warn(rbd_dev, "failed to get object map lockers: %d", ret);
1753 if (num_lockers == 0)
1756 rbd_warn(rbd_dev, "breaking object map lock owned by %s%llu",
1757 ENTITY_NAME(lockers[0].id.name));
1759 ret = ceph_cls_break_lock(osdc, &oid, &rbd_dev->header_oloc,
1760 RBD_LOCK_NAME, lockers[0].id.cookie,
1761 &lockers[0].id.name);
1762 ceph_free_lockers(lockers, num_lockers);
1767 rbd_warn(rbd_dev, "failed to break object map lock: %d", ret);
1775 static void rbd_object_map_unlock(struct rbd_device *rbd_dev)
1777 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1778 CEPH_DEFINE_OID_ONSTACK(oid);
1781 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1783 ret = ceph_cls_unlock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1785 if (ret && ret != -ENOENT)
1786 rbd_warn(rbd_dev, "failed to unlock object map: %d", ret);
1789 static int decode_object_map_header(void **p, void *end, u64 *object_map_size)
1797 ceph_decode_32_safe(p, end, header_len, e_inval);
1798 header_end = *p + header_len;
1800 ret = ceph_start_decoding(p, end, 1, "BitVector header", &struct_v,
1805 ceph_decode_64_safe(p, end, *object_map_size, e_inval);
1814 static int __rbd_object_map_load(struct rbd_device *rbd_dev)
1816 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1817 CEPH_DEFINE_OID_ONSTACK(oid);
1818 struct page **pages;
1822 u64 object_map_bytes;
1823 u64 object_map_size;
1827 rbd_assert(!rbd_dev->object_map && !rbd_dev->object_map_size);
1829 num_objects = ceph_get_num_objects(&rbd_dev->layout,
1830 rbd_dev->mapping.size);
1831 object_map_bytes = DIV_ROUND_UP_ULL(num_objects * BITS_PER_OBJ,
1833 num_pages = calc_pages_for(0, object_map_bytes) + 1;
1834 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1836 return PTR_ERR(pages);
1838 reply_len = num_pages * PAGE_SIZE;
1839 rbd_object_map_name(rbd_dev, rbd_dev->spec->snap_id, &oid);
1840 ret = ceph_osdc_call(osdc, &oid, &rbd_dev->header_oloc,
1841 "rbd", "object_map_load", CEPH_OSD_FLAG_READ,
1842 NULL, 0, pages, &reply_len);
1846 p = page_address(pages[0]);
1847 end = p + min(reply_len, (size_t)PAGE_SIZE);
1848 ret = decode_object_map_header(&p, end, &object_map_size);
1852 if (object_map_size != num_objects) {
1853 rbd_warn(rbd_dev, "object map size mismatch: %llu vs %llu",
1854 object_map_size, num_objects);
1859 if (offset_in_page(p) + object_map_bytes > reply_len) {
1864 rbd_dev->object_map = kvmalloc(object_map_bytes, GFP_KERNEL);
1865 if (!rbd_dev->object_map) {
1870 rbd_dev->object_map_size = object_map_size;
1871 ceph_copy_from_page_vector(pages, rbd_dev->object_map,
1872 offset_in_page(p), object_map_bytes);
1875 ceph_release_page_vector(pages, num_pages);
1879 static void rbd_object_map_free(struct rbd_device *rbd_dev)
1881 kvfree(rbd_dev->object_map);
1882 rbd_dev->object_map = NULL;
1883 rbd_dev->object_map_size = 0;
1886 static int rbd_object_map_load(struct rbd_device *rbd_dev)
1890 ret = __rbd_object_map_load(rbd_dev);
1894 ret = rbd_dev_v2_get_flags(rbd_dev);
1896 rbd_object_map_free(rbd_dev);
1900 if (rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID)
1901 rbd_warn(rbd_dev, "object map is invalid");
1906 static int rbd_object_map_open(struct rbd_device *rbd_dev)
1910 ret = rbd_object_map_lock(rbd_dev);
1914 ret = rbd_object_map_load(rbd_dev);
1916 rbd_object_map_unlock(rbd_dev);
1923 static void rbd_object_map_close(struct rbd_device *rbd_dev)
1925 rbd_object_map_free(rbd_dev);
1926 rbd_object_map_unlock(rbd_dev);
1930 * This function needs snap_id (or more precisely just something to
1931 * distinguish between HEAD and snapshot object maps), new_state and
1932 * current_state that were passed to rbd_object_map_update().
1934 * To avoid allocating and stashing a context we piggyback on the OSD
1935 * request. A HEAD update has two ops (assert_locked). For new_state
1936 * and current_state we decode our own object_map_update op, encoded in
1937 * rbd_cls_object_map_update().
1939 static int rbd_object_map_update_finish(struct rbd_obj_request *obj_req,
1940 struct ceph_osd_request *osd_req)
1942 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1943 struct ceph_osd_data *osd_data;
1945 u8 state, new_state, current_state;
1946 bool has_current_state;
1949 if (osd_req->r_result)
1950 return osd_req->r_result;
1953 * Nothing to do for a snapshot object map.
1955 if (osd_req->r_num_ops == 1)
1959 * Update in-memory HEAD object map.
1961 rbd_assert(osd_req->r_num_ops == 2);
1962 osd_data = osd_req_op_data(osd_req, 1, cls, request_data);
1963 rbd_assert(osd_data->type == CEPH_OSD_DATA_TYPE_PAGES);
1965 p = page_address(osd_data->pages[0]);
1966 objno = ceph_decode_64(&p);
1967 rbd_assert(objno == obj_req->ex.oe_objno);
1968 rbd_assert(ceph_decode_64(&p) == objno + 1);
1969 new_state = ceph_decode_8(&p);
1970 has_current_state = ceph_decode_8(&p);
1971 if (has_current_state)
1972 current_state = ceph_decode_8(&p);
1974 spin_lock(&rbd_dev->object_map_lock);
1975 state = __rbd_object_map_get(rbd_dev, objno);
1976 if (!has_current_state || current_state == state ||
1977 (current_state == OBJECT_EXISTS && state == OBJECT_EXISTS_CLEAN))
1978 __rbd_object_map_set(rbd_dev, objno, new_state);
1979 spin_unlock(&rbd_dev->object_map_lock);
1984 static void rbd_object_map_callback(struct ceph_osd_request *osd_req)
1986 struct rbd_obj_request *obj_req = osd_req->r_priv;
1989 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
1990 osd_req->r_result, obj_req);
1992 result = rbd_object_map_update_finish(obj_req, osd_req);
1993 rbd_obj_handle_request(obj_req, result);
1996 static bool update_needed(struct rbd_device *rbd_dev, u64 objno, u8 new_state)
1998 u8 state = rbd_object_map_get(rbd_dev, objno);
2000 if (state == new_state ||
2001 (new_state == OBJECT_PENDING && state == OBJECT_NONEXISTENT) ||
2002 (new_state == OBJECT_NONEXISTENT && state != OBJECT_PENDING))
2008 static int rbd_cls_object_map_update(struct ceph_osd_request *req,
2009 int which, u64 objno, u8 new_state,
2010 const u8 *current_state)
2012 struct page **pages;
2016 ret = osd_req_op_cls_init(req, which, "rbd", "object_map_update");
2020 pages = ceph_alloc_page_vector(1, GFP_NOIO);
2022 return PTR_ERR(pages);
2024 p = start = page_address(pages[0]);
2025 ceph_encode_64(&p, objno);
2026 ceph_encode_64(&p, objno + 1);
2027 ceph_encode_8(&p, new_state);
2028 if (current_state) {
2029 ceph_encode_8(&p, 1);
2030 ceph_encode_8(&p, *current_state);
2032 ceph_encode_8(&p, 0);
2035 osd_req_op_cls_request_data_pages(req, which, pages, p - start, 0,
2042 * 0 - object map update sent
2043 * 1 - object map update isn't needed
2046 static int rbd_object_map_update(struct rbd_obj_request *obj_req, u64 snap_id,
2047 u8 new_state, const u8 *current_state)
2049 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2050 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2051 struct ceph_osd_request *req;
2056 if (snap_id == CEPH_NOSNAP) {
2057 if (!update_needed(rbd_dev, obj_req->ex.oe_objno, new_state))
2060 num_ops++; /* assert_locked */
2063 req = ceph_osdc_alloc_request(osdc, NULL, num_ops, false, GFP_NOIO);
2067 list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
2068 req->r_callback = rbd_object_map_callback;
2069 req->r_priv = obj_req;
2071 rbd_object_map_name(rbd_dev, snap_id, &req->r_base_oid);
2072 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
2073 req->r_flags = CEPH_OSD_FLAG_WRITE;
2074 ktime_get_real_ts64(&req->r_mtime);
2076 if (snap_id == CEPH_NOSNAP) {
2078 * Protect against possible race conditions during lock
2079 * ownership transitions.
2081 ret = ceph_cls_assert_locked(req, which++, RBD_LOCK_NAME,
2082 CEPH_CLS_LOCK_EXCLUSIVE, "", "");
2087 ret = rbd_cls_object_map_update(req, which, obj_req->ex.oe_objno,
2088 new_state, current_state);
2092 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
2096 ceph_osdc_start_request(osdc, req, false);
2100 static void prune_extents(struct ceph_file_extent *img_extents,
2101 u32 *num_img_extents, u64 overlap)
2103 u32 cnt = *num_img_extents;
2105 /* drop extents completely beyond the overlap */
2106 while (cnt && img_extents[cnt - 1].fe_off >= overlap)
2110 struct ceph_file_extent *ex = &img_extents[cnt - 1];
2112 /* trim final overlapping extent */
2113 if (ex->fe_off + ex->fe_len > overlap)
2114 ex->fe_len = overlap - ex->fe_off;
2117 *num_img_extents = cnt;
2121 * Determine the byte range(s) covered by either just the object extent
2122 * or the entire object in the parent image.
2124 static int rbd_obj_calc_img_extents(struct rbd_obj_request *obj_req,
2127 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2130 if (!rbd_dev->parent_overlap)
2133 ret = ceph_extent_to_file(&rbd_dev->layout, obj_req->ex.oe_objno,
2134 entire ? 0 : obj_req->ex.oe_off,
2135 entire ? rbd_dev->layout.object_size :
2137 &obj_req->img_extents,
2138 &obj_req->num_img_extents);
2142 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
2143 rbd_dev->parent_overlap);
2147 static void rbd_osd_setup_data(struct ceph_osd_request *osd_req, int which)
2149 struct rbd_obj_request *obj_req = osd_req->r_priv;
2151 switch (obj_req->img_request->data_type) {
2152 case OBJ_REQUEST_BIO:
2153 osd_req_op_extent_osd_data_bio(osd_req, which,
2155 obj_req->ex.oe_len);
2157 case OBJ_REQUEST_BVECS:
2158 case OBJ_REQUEST_OWN_BVECS:
2159 rbd_assert(obj_req->bvec_pos.iter.bi_size ==
2160 obj_req->ex.oe_len);
2161 rbd_assert(obj_req->bvec_idx == obj_req->bvec_count);
2162 osd_req_op_extent_osd_data_bvec_pos(osd_req, which,
2163 &obj_req->bvec_pos);
2170 static int rbd_osd_setup_stat(struct ceph_osd_request *osd_req, int which)
2172 struct page **pages;
2175 * The response data for a STAT call consists of:
2182 pages = ceph_alloc_page_vector(1, GFP_NOIO);
2184 return PTR_ERR(pages);
2186 osd_req_op_init(osd_req, which, CEPH_OSD_OP_STAT, 0);
2187 osd_req_op_raw_data_in_pages(osd_req, which, pages,
2188 8 + sizeof(struct ceph_timespec),
2193 static int rbd_osd_setup_copyup(struct ceph_osd_request *osd_req, int which,
2196 struct rbd_obj_request *obj_req = osd_req->r_priv;
2199 ret = osd_req_op_cls_init(osd_req, which, "rbd", "copyup");
2203 osd_req_op_cls_request_data_bvecs(osd_req, which, obj_req->copyup_bvecs,
2204 obj_req->copyup_bvec_count, bytes);
2208 static int rbd_obj_init_read(struct rbd_obj_request *obj_req)
2210 obj_req->read_state = RBD_OBJ_READ_START;
2214 static void __rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
2217 struct rbd_obj_request *obj_req = osd_req->r_priv;
2218 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2221 if (!use_object_map(rbd_dev) ||
2222 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST)) {
2223 osd_req_op_alloc_hint_init(osd_req, which++,
2224 rbd_dev->layout.object_size,
2225 rbd_dev->layout.object_size,
2226 rbd_dev->opts->alloc_hint_flags);
2229 if (rbd_obj_is_entire(obj_req))
2230 opcode = CEPH_OSD_OP_WRITEFULL;
2232 opcode = CEPH_OSD_OP_WRITE;
2234 osd_req_op_extent_init(osd_req, which, opcode,
2235 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
2236 rbd_osd_setup_data(osd_req, which);
2239 static int rbd_obj_init_write(struct rbd_obj_request *obj_req)
2243 /* reverse map the entire object onto the parent */
2244 ret = rbd_obj_calc_img_extents(obj_req, true);
2248 if (rbd_obj_copyup_enabled(obj_req))
2249 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
2251 obj_req->write_state = RBD_OBJ_WRITE_START;
2255 static u16 truncate_or_zero_opcode(struct rbd_obj_request *obj_req)
2257 return rbd_obj_is_tail(obj_req) ? CEPH_OSD_OP_TRUNCATE :
2261 static void __rbd_osd_setup_discard_ops(struct ceph_osd_request *osd_req,
2264 struct rbd_obj_request *obj_req = osd_req->r_priv;
2266 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents) {
2267 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
2268 osd_req_op_init(osd_req, which, CEPH_OSD_OP_DELETE, 0);
2270 osd_req_op_extent_init(osd_req, which,
2271 truncate_or_zero_opcode(obj_req),
2272 obj_req->ex.oe_off, obj_req->ex.oe_len,
2277 static int rbd_obj_init_discard(struct rbd_obj_request *obj_req)
2279 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2284 * Align the range to alloc_size boundary and punt on discards
2285 * that are too small to free up any space.
2287 * alloc_size == object_size && is_tail() is a special case for
2288 * filestore with filestore_punch_hole = false, needed to allow
2289 * truncate (in addition to delete).
2291 if (rbd_dev->opts->alloc_size != rbd_dev->layout.object_size ||
2292 !rbd_obj_is_tail(obj_req)) {
2293 off = round_up(obj_req->ex.oe_off, rbd_dev->opts->alloc_size);
2294 next_off = round_down(obj_req->ex.oe_off + obj_req->ex.oe_len,
2295 rbd_dev->opts->alloc_size);
2296 if (off >= next_off)
2299 dout("%s %p %llu~%llu -> %llu~%llu\n", __func__,
2300 obj_req, obj_req->ex.oe_off, obj_req->ex.oe_len,
2301 off, next_off - off);
2302 obj_req->ex.oe_off = off;
2303 obj_req->ex.oe_len = next_off - off;
2306 /* reverse map the entire object onto the parent */
2307 ret = rbd_obj_calc_img_extents(obj_req, true);
2311 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
2312 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents)
2313 obj_req->flags |= RBD_OBJ_FLAG_DELETION;
2315 obj_req->write_state = RBD_OBJ_WRITE_START;
2319 static void __rbd_osd_setup_zeroout_ops(struct ceph_osd_request *osd_req,
2322 struct rbd_obj_request *obj_req = osd_req->r_priv;
2325 if (rbd_obj_is_entire(obj_req)) {
2326 if (obj_req->num_img_extents) {
2327 if (!(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
2328 osd_req_op_init(osd_req, which++,
2329 CEPH_OSD_OP_CREATE, 0);
2330 opcode = CEPH_OSD_OP_TRUNCATE;
2332 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
2333 osd_req_op_init(osd_req, which++,
2334 CEPH_OSD_OP_DELETE, 0);
2338 opcode = truncate_or_zero_opcode(obj_req);
2342 osd_req_op_extent_init(osd_req, which, opcode,
2343 obj_req->ex.oe_off, obj_req->ex.oe_len,
2347 static int rbd_obj_init_zeroout(struct rbd_obj_request *obj_req)
2351 /* reverse map the entire object onto the parent */
2352 ret = rbd_obj_calc_img_extents(obj_req, true);
2356 if (rbd_obj_copyup_enabled(obj_req))
2357 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
2358 if (!obj_req->num_img_extents) {
2359 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
2360 if (rbd_obj_is_entire(obj_req))
2361 obj_req->flags |= RBD_OBJ_FLAG_DELETION;
2364 obj_req->write_state = RBD_OBJ_WRITE_START;
2368 static int count_write_ops(struct rbd_obj_request *obj_req)
2370 struct rbd_img_request *img_req = obj_req->img_request;
2372 switch (img_req->op_type) {
2374 if (!use_object_map(img_req->rbd_dev) ||
2375 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST))
2376 return 2; /* setallochint + write/writefull */
2378 return 1; /* write/writefull */
2379 case OBJ_OP_DISCARD:
2380 return 1; /* delete/truncate/zero */
2381 case OBJ_OP_ZEROOUT:
2382 if (rbd_obj_is_entire(obj_req) && obj_req->num_img_extents &&
2383 !(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
2384 return 2; /* create + truncate */
2386 return 1; /* delete/truncate/zero */
2392 static void rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
2395 struct rbd_obj_request *obj_req = osd_req->r_priv;
2397 switch (obj_req->img_request->op_type) {
2399 __rbd_osd_setup_write_ops(osd_req, which);
2401 case OBJ_OP_DISCARD:
2402 __rbd_osd_setup_discard_ops(osd_req, which);
2404 case OBJ_OP_ZEROOUT:
2405 __rbd_osd_setup_zeroout_ops(osd_req, which);
2413 * Prune the list of object requests (adjust offset and/or length, drop
2414 * redundant requests). Prepare object request state machines and image
2415 * request state machine for execution.
2417 static int __rbd_img_fill_request(struct rbd_img_request *img_req)
2419 struct rbd_obj_request *obj_req, *next_obj_req;
2422 for_each_obj_request_safe(img_req, obj_req, next_obj_req) {
2423 switch (img_req->op_type) {
2425 ret = rbd_obj_init_read(obj_req);
2428 ret = rbd_obj_init_write(obj_req);
2430 case OBJ_OP_DISCARD:
2431 ret = rbd_obj_init_discard(obj_req);
2433 case OBJ_OP_ZEROOUT:
2434 ret = rbd_obj_init_zeroout(obj_req);
2442 rbd_img_obj_request_del(img_req, obj_req);
2447 img_req->state = RBD_IMG_START;
2451 union rbd_img_fill_iter {
2452 struct ceph_bio_iter bio_iter;
2453 struct ceph_bvec_iter bvec_iter;
2456 struct rbd_img_fill_ctx {
2457 enum obj_request_type pos_type;
2458 union rbd_img_fill_iter *pos;
2459 union rbd_img_fill_iter iter;
2460 ceph_object_extent_fn_t set_pos_fn;
2461 ceph_object_extent_fn_t count_fn;
2462 ceph_object_extent_fn_t copy_fn;
2465 static struct ceph_object_extent *alloc_object_extent(void *arg)
2467 struct rbd_img_request *img_req = arg;
2468 struct rbd_obj_request *obj_req;
2470 obj_req = rbd_obj_request_create();
2474 rbd_img_obj_request_add(img_req, obj_req);
2475 return &obj_req->ex;
2479 * While su != os && sc == 1 is technically not fancy (it's the same
2480 * layout as su == os && sc == 1), we can't use the nocopy path for it
2481 * because ->set_pos_fn() should be called only once per object.
2482 * ceph_file_to_extents() invokes action_fn once per stripe unit, so
2483 * treat su != os && sc == 1 as fancy.
2485 static bool rbd_layout_is_fancy(struct ceph_file_layout *l)
2487 return l->stripe_unit != l->object_size;
2490 static int rbd_img_fill_request_nocopy(struct rbd_img_request *img_req,
2491 struct ceph_file_extent *img_extents,
2492 u32 num_img_extents,
2493 struct rbd_img_fill_ctx *fctx)
2498 img_req->data_type = fctx->pos_type;
2501 * Create object requests and set each object request's starting
2502 * position in the provided bio (list) or bio_vec array.
2504 fctx->iter = *fctx->pos;
2505 for (i = 0; i < num_img_extents; i++) {
2506 ret = ceph_file_to_extents(&img_req->rbd_dev->layout,
2507 img_extents[i].fe_off,
2508 img_extents[i].fe_len,
2509 &img_req->object_extents,
2510 alloc_object_extent, img_req,
2511 fctx->set_pos_fn, &fctx->iter);
2516 return __rbd_img_fill_request(img_req);
2520 * Map a list of image extents to a list of object extents, create the
2521 * corresponding object requests (normally each to a different object,
2522 * but not always) and add them to @img_req. For each object request,
2523 * set up its data descriptor to point to the corresponding chunk(s) of
2524 * @fctx->pos data buffer.
2526 * Because ceph_file_to_extents() will merge adjacent object extents
2527 * together, each object request's data descriptor may point to multiple
2528 * different chunks of @fctx->pos data buffer.
2530 * @fctx->pos data buffer is assumed to be large enough.
2532 static int rbd_img_fill_request(struct rbd_img_request *img_req,
2533 struct ceph_file_extent *img_extents,
2534 u32 num_img_extents,
2535 struct rbd_img_fill_ctx *fctx)
2537 struct rbd_device *rbd_dev = img_req->rbd_dev;
2538 struct rbd_obj_request *obj_req;
2542 if (fctx->pos_type == OBJ_REQUEST_NODATA ||
2543 !rbd_layout_is_fancy(&rbd_dev->layout))
2544 return rbd_img_fill_request_nocopy(img_req, img_extents,
2545 num_img_extents, fctx);
2547 img_req->data_type = OBJ_REQUEST_OWN_BVECS;
2550 * Create object requests and determine ->bvec_count for each object
2551 * request. Note that ->bvec_count sum over all object requests may
2552 * be greater than the number of bio_vecs in the provided bio (list)
2553 * or bio_vec array because when mapped, those bio_vecs can straddle
2554 * stripe unit boundaries.
2556 fctx->iter = *fctx->pos;
2557 for (i = 0; i < num_img_extents; i++) {
2558 ret = ceph_file_to_extents(&rbd_dev->layout,
2559 img_extents[i].fe_off,
2560 img_extents[i].fe_len,
2561 &img_req->object_extents,
2562 alloc_object_extent, img_req,
2563 fctx->count_fn, &fctx->iter);
2568 for_each_obj_request(img_req, obj_req) {
2569 obj_req->bvec_pos.bvecs = kmalloc_array(obj_req->bvec_count,
2570 sizeof(*obj_req->bvec_pos.bvecs),
2572 if (!obj_req->bvec_pos.bvecs)
2577 * Fill in each object request's private bio_vec array, splitting and
2578 * rearranging the provided bio_vecs in stripe unit chunks as needed.
2580 fctx->iter = *fctx->pos;
2581 for (i = 0; i < num_img_extents; i++) {
2582 ret = ceph_iterate_extents(&rbd_dev->layout,
2583 img_extents[i].fe_off,
2584 img_extents[i].fe_len,
2585 &img_req->object_extents,
2586 fctx->copy_fn, &fctx->iter);
2591 return __rbd_img_fill_request(img_req);
2594 static int rbd_img_fill_nodata(struct rbd_img_request *img_req,
2597 struct ceph_file_extent ex = { off, len };
2598 union rbd_img_fill_iter dummy = {};
2599 struct rbd_img_fill_ctx fctx = {
2600 .pos_type = OBJ_REQUEST_NODATA,
2604 return rbd_img_fill_request(img_req, &ex, 1, &fctx);
2607 static void set_bio_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2609 struct rbd_obj_request *obj_req =
2610 container_of(ex, struct rbd_obj_request, ex);
2611 struct ceph_bio_iter *it = arg;
2613 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2614 obj_req->bio_pos = *it;
2615 ceph_bio_iter_advance(it, bytes);
2618 static void count_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2620 struct rbd_obj_request *obj_req =
2621 container_of(ex, struct rbd_obj_request, ex);
2622 struct ceph_bio_iter *it = arg;
2624 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2625 ceph_bio_iter_advance_step(it, bytes, ({
2626 obj_req->bvec_count++;
2631 static void copy_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2633 struct rbd_obj_request *obj_req =
2634 container_of(ex, struct rbd_obj_request, ex);
2635 struct ceph_bio_iter *it = arg;
2637 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2638 ceph_bio_iter_advance_step(it, bytes, ({
2639 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2640 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2644 static int __rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2645 struct ceph_file_extent *img_extents,
2646 u32 num_img_extents,
2647 struct ceph_bio_iter *bio_pos)
2649 struct rbd_img_fill_ctx fctx = {
2650 .pos_type = OBJ_REQUEST_BIO,
2651 .pos = (union rbd_img_fill_iter *)bio_pos,
2652 .set_pos_fn = set_bio_pos,
2653 .count_fn = count_bio_bvecs,
2654 .copy_fn = copy_bio_bvecs,
2657 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2661 static int rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2662 u64 off, u64 len, struct bio *bio)
2664 struct ceph_file_extent ex = { off, len };
2665 struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter };
2667 return __rbd_img_fill_from_bio(img_req, &ex, 1, &it);
2670 static void set_bvec_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2672 struct rbd_obj_request *obj_req =
2673 container_of(ex, struct rbd_obj_request, ex);
2674 struct ceph_bvec_iter *it = arg;
2676 obj_req->bvec_pos = *it;
2677 ceph_bvec_iter_shorten(&obj_req->bvec_pos, bytes);
2678 ceph_bvec_iter_advance(it, bytes);
2681 static void count_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2683 struct rbd_obj_request *obj_req =
2684 container_of(ex, struct rbd_obj_request, ex);
2685 struct ceph_bvec_iter *it = arg;
2687 ceph_bvec_iter_advance_step(it, bytes, ({
2688 obj_req->bvec_count++;
2692 static void copy_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2694 struct rbd_obj_request *obj_req =
2695 container_of(ex, struct rbd_obj_request, ex);
2696 struct ceph_bvec_iter *it = arg;
2698 ceph_bvec_iter_advance_step(it, bytes, ({
2699 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2700 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2704 static int __rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2705 struct ceph_file_extent *img_extents,
2706 u32 num_img_extents,
2707 struct ceph_bvec_iter *bvec_pos)
2709 struct rbd_img_fill_ctx fctx = {
2710 .pos_type = OBJ_REQUEST_BVECS,
2711 .pos = (union rbd_img_fill_iter *)bvec_pos,
2712 .set_pos_fn = set_bvec_pos,
2713 .count_fn = count_bvecs,
2714 .copy_fn = copy_bvecs,
2717 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2721 static int rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2722 struct ceph_file_extent *img_extents,
2723 u32 num_img_extents,
2724 struct bio_vec *bvecs)
2726 struct ceph_bvec_iter it = {
2728 .iter = { .bi_size = ceph_file_extents_bytes(img_extents,
2732 return __rbd_img_fill_from_bvecs(img_req, img_extents, num_img_extents,
2736 static void rbd_img_handle_request_work(struct work_struct *work)
2738 struct rbd_img_request *img_req =
2739 container_of(work, struct rbd_img_request, work);
2741 rbd_img_handle_request(img_req, img_req->work_result);
2744 static void rbd_img_schedule(struct rbd_img_request *img_req, int result)
2746 INIT_WORK(&img_req->work, rbd_img_handle_request_work);
2747 img_req->work_result = result;
2748 queue_work(rbd_wq, &img_req->work);
2751 static bool rbd_obj_may_exist(struct rbd_obj_request *obj_req)
2753 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2755 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno)) {
2756 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
2760 dout("%s %p objno %llu assuming dne\n", __func__, obj_req,
2761 obj_req->ex.oe_objno);
2765 static int rbd_obj_read_object(struct rbd_obj_request *obj_req)
2767 struct ceph_osd_request *osd_req;
2770 osd_req = __rbd_obj_add_osd_request(obj_req, NULL, 1);
2771 if (IS_ERR(osd_req))
2772 return PTR_ERR(osd_req);
2774 osd_req_op_extent_init(osd_req, 0, CEPH_OSD_OP_READ,
2775 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
2776 rbd_osd_setup_data(osd_req, 0);
2777 rbd_osd_format_read(osd_req);
2779 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
2783 rbd_osd_submit(osd_req);
2787 static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
2789 struct rbd_img_request *img_req = obj_req->img_request;
2790 struct rbd_device *parent = img_req->rbd_dev->parent;
2791 struct rbd_img_request *child_img_req;
2794 child_img_req = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
2798 rbd_img_request_init(child_img_req, parent, OBJ_OP_READ);
2799 __set_bit(IMG_REQ_CHILD, &child_img_req->flags);
2800 child_img_req->obj_request = obj_req;
2802 down_read(&parent->header_rwsem);
2803 rbd_img_capture_header(child_img_req);
2804 up_read(&parent->header_rwsem);
2806 dout("%s child_img_req %p for obj_req %p\n", __func__, child_img_req,
2809 if (!rbd_img_is_write(img_req)) {
2810 switch (img_req->data_type) {
2811 case OBJ_REQUEST_BIO:
2812 ret = __rbd_img_fill_from_bio(child_img_req,
2813 obj_req->img_extents,
2814 obj_req->num_img_extents,
2817 case OBJ_REQUEST_BVECS:
2818 case OBJ_REQUEST_OWN_BVECS:
2819 ret = __rbd_img_fill_from_bvecs(child_img_req,
2820 obj_req->img_extents,
2821 obj_req->num_img_extents,
2822 &obj_req->bvec_pos);
2828 ret = rbd_img_fill_from_bvecs(child_img_req,
2829 obj_req->img_extents,
2830 obj_req->num_img_extents,
2831 obj_req->copyup_bvecs);
2834 rbd_img_request_destroy(child_img_req);
2838 /* avoid parent chain recursion */
2839 rbd_img_schedule(child_img_req, 0);
2843 static bool rbd_obj_advance_read(struct rbd_obj_request *obj_req, int *result)
2845 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2849 switch (obj_req->read_state) {
2850 case RBD_OBJ_READ_START:
2851 rbd_assert(!*result);
2853 if (!rbd_obj_may_exist(obj_req)) {
2855 obj_req->read_state = RBD_OBJ_READ_OBJECT;
2859 ret = rbd_obj_read_object(obj_req);
2864 obj_req->read_state = RBD_OBJ_READ_OBJECT;
2866 case RBD_OBJ_READ_OBJECT:
2867 if (*result == -ENOENT && rbd_dev->parent_overlap) {
2868 /* reverse map this object extent onto the parent */
2869 ret = rbd_obj_calc_img_extents(obj_req, false);
2874 if (obj_req->num_img_extents) {
2875 ret = rbd_obj_read_from_parent(obj_req);
2880 obj_req->read_state = RBD_OBJ_READ_PARENT;
2886 * -ENOENT means a hole in the image -- zero-fill the entire
2887 * length of the request. A short read also implies zero-fill
2888 * to the end of the request.
2890 if (*result == -ENOENT) {
2891 rbd_obj_zero_range(obj_req, 0, obj_req->ex.oe_len);
2893 } else if (*result >= 0) {
2894 if (*result < obj_req->ex.oe_len)
2895 rbd_obj_zero_range(obj_req, *result,
2896 obj_req->ex.oe_len - *result);
2898 rbd_assert(*result == obj_req->ex.oe_len);
2902 case RBD_OBJ_READ_PARENT:
2904 * The parent image is read only up to the overlap -- zero-fill
2905 * from the overlap to the end of the request.
2908 u32 obj_overlap = rbd_obj_img_extents_bytes(obj_req);
2910 if (obj_overlap < obj_req->ex.oe_len)
2911 rbd_obj_zero_range(obj_req, obj_overlap,
2912 obj_req->ex.oe_len - obj_overlap);
2920 static bool rbd_obj_write_is_noop(struct rbd_obj_request *obj_req)
2922 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2924 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno))
2925 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
2927 if (!(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST) &&
2928 (obj_req->flags & RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT)) {
2929 dout("%s %p noop for nonexistent\n", __func__, obj_req);
2938 * 0 - object map update sent
2939 * 1 - object map update isn't needed
2942 static int rbd_obj_write_pre_object_map(struct rbd_obj_request *obj_req)
2944 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2947 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
2950 if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
2951 new_state = OBJECT_PENDING;
2953 new_state = OBJECT_EXISTS;
2955 return rbd_object_map_update(obj_req, CEPH_NOSNAP, new_state, NULL);
2958 static int rbd_obj_write_object(struct rbd_obj_request *obj_req)
2960 struct ceph_osd_request *osd_req;
2961 int num_ops = count_write_ops(obj_req);
2965 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED)
2966 num_ops++; /* stat */
2968 osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
2969 if (IS_ERR(osd_req))
2970 return PTR_ERR(osd_req);
2972 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
2973 ret = rbd_osd_setup_stat(osd_req, which++);
2978 rbd_osd_setup_write_ops(osd_req, which);
2979 rbd_osd_format_write(osd_req);
2981 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
2985 rbd_osd_submit(osd_req);
2990 * copyup_bvecs pages are never highmem pages
2992 static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes)
2994 struct ceph_bvec_iter it = {
2996 .iter = { .bi_size = bytes },
2999 ceph_bvec_iter_advance_step(&it, bytes, ({
3000 if (memchr_inv(page_address(bv.bv_page) + bv.bv_offset, 0,
3007 #define MODS_ONLY U32_MAX
3009 static int rbd_obj_copyup_empty_snapc(struct rbd_obj_request *obj_req,
3012 struct ceph_osd_request *osd_req;
3015 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
3016 rbd_assert(bytes > 0 && bytes != MODS_ONLY);
3018 osd_req = __rbd_obj_add_osd_request(obj_req, &rbd_empty_snapc, 1);
3019 if (IS_ERR(osd_req))
3020 return PTR_ERR(osd_req);
3022 ret = rbd_osd_setup_copyup(osd_req, 0, bytes);
3026 rbd_osd_format_write(osd_req);
3028 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3032 rbd_osd_submit(osd_req);
3036 static int rbd_obj_copyup_current_snapc(struct rbd_obj_request *obj_req,
3039 struct ceph_osd_request *osd_req;
3040 int num_ops = count_write_ops(obj_req);
3044 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
3046 if (bytes != MODS_ONLY)
3047 num_ops++; /* copyup */
3049 osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
3050 if (IS_ERR(osd_req))
3051 return PTR_ERR(osd_req);
3053 if (bytes != MODS_ONLY) {
3054 ret = rbd_osd_setup_copyup(osd_req, which++, bytes);
3059 rbd_osd_setup_write_ops(osd_req, which);
3060 rbd_osd_format_write(osd_req);
3062 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3066 rbd_osd_submit(osd_req);
3070 static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap)
3074 rbd_assert(!obj_req->copyup_bvecs);
3075 obj_req->copyup_bvec_count = calc_pages_for(0, obj_overlap);
3076 obj_req->copyup_bvecs = kcalloc(obj_req->copyup_bvec_count,
3077 sizeof(*obj_req->copyup_bvecs),
3079 if (!obj_req->copyup_bvecs)
3082 for (i = 0; i < obj_req->copyup_bvec_count; i++) {
3083 unsigned int len = min(obj_overlap, (u64)PAGE_SIZE);
3085 obj_req->copyup_bvecs[i].bv_page = alloc_page(GFP_NOIO);
3086 if (!obj_req->copyup_bvecs[i].bv_page)
3089 obj_req->copyup_bvecs[i].bv_offset = 0;
3090 obj_req->copyup_bvecs[i].bv_len = len;
3094 rbd_assert(!obj_overlap);
3099 * The target object doesn't exist. Read the data for the entire
3100 * target object up to the overlap point (if any) from the parent,
3101 * so we can use it for a copyup.
3103 static int rbd_obj_copyup_read_parent(struct rbd_obj_request *obj_req)
3105 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3108 rbd_assert(obj_req->num_img_extents);
3109 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
3110 rbd_dev->parent_overlap);
3111 if (!obj_req->num_img_extents) {
3113 * The overlap has become 0 (most likely because the
3114 * image has been flattened). Re-submit the original write
3115 * request -- pass MODS_ONLY since the copyup isn't needed
3118 return rbd_obj_copyup_current_snapc(obj_req, MODS_ONLY);
3121 ret = setup_copyup_bvecs(obj_req, rbd_obj_img_extents_bytes(obj_req));
3125 return rbd_obj_read_from_parent(obj_req);
3128 static void rbd_obj_copyup_object_maps(struct rbd_obj_request *obj_req)
3130 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3131 struct ceph_snap_context *snapc = obj_req->img_request->snapc;
3136 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
3138 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3141 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3144 for (i = 0; i < snapc->num_snaps; i++) {
3145 if ((rbd_dev->header.features & RBD_FEATURE_FAST_DIFF) &&
3146 i + 1 < snapc->num_snaps)
3147 new_state = OBJECT_EXISTS_CLEAN;
3149 new_state = OBJECT_EXISTS;
3151 ret = rbd_object_map_update(obj_req, snapc->snaps[i],
3154 obj_req->pending.result = ret;
3159 obj_req->pending.num_pending++;
3163 static void rbd_obj_copyup_write_object(struct rbd_obj_request *obj_req)
3165 u32 bytes = rbd_obj_img_extents_bytes(obj_req);
3168 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
3171 * Only send non-zero copyup data to save some I/O and network
3172 * bandwidth -- zero copyup data is equivalent to the object not
3175 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3178 if (obj_req->img_request->snapc->num_snaps && bytes > 0) {
3180 * Send a copyup request with an empty snapshot context to
3181 * deep-copyup the object through all existing snapshots.
3182 * A second request with the current snapshot context will be
3183 * sent for the actual modification.
3185 ret = rbd_obj_copyup_empty_snapc(obj_req, bytes);
3187 obj_req->pending.result = ret;
3191 obj_req->pending.num_pending++;
3195 ret = rbd_obj_copyup_current_snapc(obj_req, bytes);
3197 obj_req->pending.result = ret;
3201 obj_req->pending.num_pending++;
3204 static bool rbd_obj_advance_copyup(struct rbd_obj_request *obj_req, int *result)
3206 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3210 switch (obj_req->copyup_state) {
3211 case RBD_OBJ_COPYUP_START:
3212 rbd_assert(!*result);
3214 ret = rbd_obj_copyup_read_parent(obj_req);
3219 if (obj_req->num_img_extents)
3220 obj_req->copyup_state = RBD_OBJ_COPYUP_READ_PARENT;
3222 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3224 case RBD_OBJ_COPYUP_READ_PARENT:
3228 if (is_zero_bvecs(obj_req->copyup_bvecs,
3229 rbd_obj_img_extents_bytes(obj_req))) {
3230 dout("%s %p detected zeros\n", __func__, obj_req);
3231 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ZEROS;
3234 rbd_obj_copyup_object_maps(obj_req);
3235 if (!obj_req->pending.num_pending) {
3236 *result = obj_req->pending.result;
3237 obj_req->copyup_state = RBD_OBJ_COPYUP_OBJECT_MAPS;
3240 obj_req->copyup_state = __RBD_OBJ_COPYUP_OBJECT_MAPS;
3242 case __RBD_OBJ_COPYUP_OBJECT_MAPS:
3243 if (!pending_result_dec(&obj_req->pending, result))
3246 case RBD_OBJ_COPYUP_OBJECT_MAPS:
3248 rbd_warn(rbd_dev, "snap object map update failed: %d",
3253 rbd_obj_copyup_write_object(obj_req);
3254 if (!obj_req->pending.num_pending) {
3255 *result = obj_req->pending.result;
3256 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3259 obj_req->copyup_state = __RBD_OBJ_COPYUP_WRITE_OBJECT;
3261 case __RBD_OBJ_COPYUP_WRITE_OBJECT:
3262 if (!pending_result_dec(&obj_req->pending, result))
3265 case RBD_OBJ_COPYUP_WRITE_OBJECT:
3274 * 0 - object map update sent
3275 * 1 - object map update isn't needed
3278 static int rbd_obj_write_post_object_map(struct rbd_obj_request *obj_req)
3280 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3281 u8 current_state = OBJECT_PENDING;
3283 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3286 if (!(obj_req->flags & RBD_OBJ_FLAG_DELETION))
3289 return rbd_object_map_update(obj_req, CEPH_NOSNAP, OBJECT_NONEXISTENT,
3293 static bool rbd_obj_advance_write(struct rbd_obj_request *obj_req, int *result)
3295 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3299 switch (obj_req->write_state) {
3300 case RBD_OBJ_WRITE_START:
3301 rbd_assert(!*result);
3303 if (rbd_obj_write_is_noop(obj_req))
3306 ret = rbd_obj_write_pre_object_map(obj_req);
3311 obj_req->write_state = RBD_OBJ_WRITE_PRE_OBJECT_MAP;
3315 case RBD_OBJ_WRITE_PRE_OBJECT_MAP:
3317 rbd_warn(rbd_dev, "pre object map update failed: %d",
3321 ret = rbd_obj_write_object(obj_req);
3326 obj_req->write_state = RBD_OBJ_WRITE_OBJECT;
3328 case RBD_OBJ_WRITE_OBJECT:
3329 if (*result == -ENOENT) {
3330 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
3332 obj_req->copyup_state = RBD_OBJ_COPYUP_START;
3333 obj_req->write_state = __RBD_OBJ_WRITE_COPYUP;
3337 * On a non-existent object:
3338 * delete - -ENOENT, truncate/zero - 0
3340 if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
3346 obj_req->write_state = RBD_OBJ_WRITE_COPYUP;
3348 case __RBD_OBJ_WRITE_COPYUP:
3349 if (!rbd_obj_advance_copyup(obj_req, result))
3352 case RBD_OBJ_WRITE_COPYUP:
3354 rbd_warn(rbd_dev, "copyup failed: %d", *result);
3357 ret = rbd_obj_write_post_object_map(obj_req);
3362 obj_req->write_state = RBD_OBJ_WRITE_POST_OBJECT_MAP;
3366 case RBD_OBJ_WRITE_POST_OBJECT_MAP:
3368 rbd_warn(rbd_dev, "post object map update failed: %d",
3377 * Return true if @obj_req is completed.
3379 static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req,
3382 struct rbd_img_request *img_req = obj_req->img_request;
3383 struct rbd_device *rbd_dev = img_req->rbd_dev;
3386 mutex_lock(&obj_req->state_mutex);
3387 if (!rbd_img_is_write(img_req))
3388 done = rbd_obj_advance_read(obj_req, result);
3390 done = rbd_obj_advance_write(obj_req, result);
3391 mutex_unlock(&obj_req->state_mutex);
3393 if (done && *result) {
3394 rbd_assert(*result < 0);
3395 rbd_warn(rbd_dev, "%s at objno %llu %llu~%llu result %d",
3396 obj_op_name(img_req->op_type), obj_req->ex.oe_objno,
3397 obj_req->ex.oe_off, obj_req->ex.oe_len, *result);
3403 * This is open-coded in rbd_img_handle_request() to avoid parent chain
3406 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result)
3408 if (__rbd_obj_handle_request(obj_req, &result))
3409 rbd_img_handle_request(obj_req->img_request, result);
3412 static bool need_exclusive_lock(struct rbd_img_request *img_req)
3414 struct rbd_device *rbd_dev = img_req->rbd_dev;
3416 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK))
3419 if (rbd_is_ro(rbd_dev))
3422 rbd_assert(!test_bit(IMG_REQ_CHILD, &img_req->flags));
3423 if (rbd_dev->opts->lock_on_read ||
3424 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3427 return rbd_img_is_write(img_req);
3430 static bool rbd_lock_add_request(struct rbd_img_request *img_req)
3432 struct rbd_device *rbd_dev = img_req->rbd_dev;
3435 lockdep_assert_held(&rbd_dev->lock_rwsem);
3436 locked = rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED;
3437 spin_lock(&rbd_dev->lock_lists_lock);
3438 rbd_assert(list_empty(&img_req->lock_item));
3440 list_add_tail(&img_req->lock_item, &rbd_dev->acquiring_list);
3442 list_add_tail(&img_req->lock_item, &rbd_dev->running_list);
3443 spin_unlock(&rbd_dev->lock_lists_lock);
3447 static void rbd_lock_del_request(struct rbd_img_request *img_req)
3449 struct rbd_device *rbd_dev = img_req->rbd_dev;
3452 lockdep_assert_held(&rbd_dev->lock_rwsem);
3453 spin_lock(&rbd_dev->lock_lists_lock);
3454 rbd_assert(!list_empty(&img_req->lock_item));
3455 list_del_init(&img_req->lock_item);
3456 need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
3457 list_empty(&rbd_dev->running_list));
3458 spin_unlock(&rbd_dev->lock_lists_lock);
3460 complete(&rbd_dev->releasing_wait);
3463 static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
3465 struct rbd_device *rbd_dev = img_req->rbd_dev;
3467 if (!need_exclusive_lock(img_req))
3470 if (rbd_lock_add_request(img_req))
3473 if (rbd_dev->opts->exclusive) {
3474 WARN_ON(1); /* lock got released? */
3479 * Note the use of mod_delayed_work() in rbd_acquire_lock()
3480 * and cancel_delayed_work() in wake_lock_waiters().
3482 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
3483 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
3487 static void rbd_img_object_requests(struct rbd_img_request *img_req)
3489 struct rbd_obj_request *obj_req;
3491 rbd_assert(!img_req->pending.result && !img_req->pending.num_pending);
3493 for_each_obj_request(img_req, obj_req) {
3496 if (__rbd_obj_handle_request(obj_req, &result)) {
3498 img_req->pending.result = result;
3502 img_req->pending.num_pending++;
3507 static bool rbd_img_advance(struct rbd_img_request *img_req, int *result)
3509 struct rbd_device *rbd_dev = img_req->rbd_dev;
3513 switch (img_req->state) {
3515 rbd_assert(!*result);
3517 ret = rbd_img_exclusive_lock(img_req);
3522 img_req->state = RBD_IMG_EXCLUSIVE_LOCK;
3526 case RBD_IMG_EXCLUSIVE_LOCK:
3530 rbd_assert(!need_exclusive_lock(img_req) ||
3531 __rbd_is_lock_owner(rbd_dev));
3533 rbd_img_object_requests(img_req);
3534 if (!img_req->pending.num_pending) {
3535 *result = img_req->pending.result;
3536 img_req->state = RBD_IMG_OBJECT_REQUESTS;
3539 img_req->state = __RBD_IMG_OBJECT_REQUESTS;
3541 case __RBD_IMG_OBJECT_REQUESTS:
3542 if (!pending_result_dec(&img_req->pending, result))
3545 case RBD_IMG_OBJECT_REQUESTS:
3553 * Return true if @img_req is completed.
3555 static bool __rbd_img_handle_request(struct rbd_img_request *img_req,
3558 struct rbd_device *rbd_dev = img_req->rbd_dev;
3561 if (need_exclusive_lock(img_req)) {
3562 down_read(&rbd_dev->lock_rwsem);
3563 mutex_lock(&img_req->state_mutex);
3564 done = rbd_img_advance(img_req, result);
3566 rbd_lock_del_request(img_req);
3567 mutex_unlock(&img_req->state_mutex);
3568 up_read(&rbd_dev->lock_rwsem);
3570 mutex_lock(&img_req->state_mutex);
3571 done = rbd_img_advance(img_req, result);
3572 mutex_unlock(&img_req->state_mutex);
3575 if (done && *result) {
3576 rbd_assert(*result < 0);
3577 rbd_warn(rbd_dev, "%s%s result %d",
3578 test_bit(IMG_REQ_CHILD, &img_req->flags) ? "child " : "",
3579 obj_op_name(img_req->op_type), *result);
3584 static void rbd_img_handle_request(struct rbd_img_request *img_req, int result)
3587 if (!__rbd_img_handle_request(img_req, &result))
3590 if (test_bit(IMG_REQ_CHILD, &img_req->flags)) {
3591 struct rbd_obj_request *obj_req = img_req->obj_request;
3593 rbd_img_request_destroy(img_req);
3594 if (__rbd_obj_handle_request(obj_req, &result)) {
3595 img_req = obj_req->img_request;
3599 struct request *rq = blk_mq_rq_from_pdu(img_req);
3601 rbd_img_request_destroy(img_req);
3602 blk_mq_end_request(rq, errno_to_blk_status(result));
3606 static const struct rbd_client_id rbd_empty_cid;
3608 static bool rbd_cid_equal(const struct rbd_client_id *lhs,
3609 const struct rbd_client_id *rhs)
3611 return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
3614 static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
3616 struct rbd_client_id cid;
3618 mutex_lock(&rbd_dev->watch_mutex);
3619 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
3620 cid.handle = rbd_dev->watch_cookie;
3621 mutex_unlock(&rbd_dev->watch_mutex);
3626 * lock_rwsem must be held for write
3628 static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
3629 const struct rbd_client_id *cid)
3631 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
3632 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
3633 cid->gid, cid->handle);
3634 rbd_dev->owner_cid = *cid; /* struct */
3637 static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
3639 mutex_lock(&rbd_dev->watch_mutex);
3640 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
3641 mutex_unlock(&rbd_dev->watch_mutex);
3644 static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
3646 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3648 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
3649 strcpy(rbd_dev->lock_cookie, cookie);
3650 rbd_set_owner_cid(rbd_dev, &cid);
3651 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3655 * lock_rwsem must be held for write
3657 static int rbd_lock(struct rbd_device *rbd_dev)
3659 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3663 WARN_ON(__rbd_is_lock_owner(rbd_dev) ||
3664 rbd_dev->lock_cookie[0] != '\0');
3666 format_lock_cookie(rbd_dev, cookie);
3667 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3668 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
3669 RBD_LOCK_TAG, "", 0);
3673 __rbd_lock(rbd_dev, cookie);
3678 * lock_rwsem must be held for write
3680 static void rbd_unlock(struct rbd_device *rbd_dev)
3682 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3685 WARN_ON(!__rbd_is_lock_owner(rbd_dev) ||
3686 rbd_dev->lock_cookie[0] == '\0');
3688 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3689 RBD_LOCK_NAME, rbd_dev->lock_cookie);
3690 if (ret && ret != -ENOENT)
3691 rbd_warn(rbd_dev, "failed to unlock header: %d", ret);
3693 /* treat errors as the image is unlocked */
3694 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
3695 rbd_dev->lock_cookie[0] = '\0';
3696 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3697 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
3700 static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
3701 enum rbd_notify_op notify_op,
3702 struct page ***preply_pages,
3705 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3706 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3707 char buf[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN];
3708 int buf_size = sizeof(buf);
3711 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
3713 /* encode *LockPayload NotifyMessage (op + ClientId) */
3714 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
3715 ceph_encode_32(&p, notify_op);
3716 ceph_encode_64(&p, cid.gid);
3717 ceph_encode_64(&p, cid.handle);
3719 return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
3720 &rbd_dev->header_oloc, buf, buf_size,
3721 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
3724 static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
3725 enum rbd_notify_op notify_op)
3727 __rbd_notify_op_lock(rbd_dev, notify_op, NULL, NULL);
3730 static void rbd_notify_acquired_lock(struct work_struct *work)
3732 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3733 acquired_lock_work);
3735 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
3738 static void rbd_notify_released_lock(struct work_struct *work)
3740 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3741 released_lock_work);
3743 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
3746 static int rbd_request_lock(struct rbd_device *rbd_dev)
3748 struct page **reply_pages;
3750 bool lock_owner_responded = false;
3753 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3755 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
3756 &reply_pages, &reply_len);
3757 if (ret && ret != -ETIMEDOUT) {
3758 rbd_warn(rbd_dev, "failed to request lock: %d", ret);
3762 if (reply_len > 0 && reply_len <= PAGE_SIZE) {
3763 void *p = page_address(reply_pages[0]);
3764 void *const end = p + reply_len;
3767 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
3772 ceph_decode_need(&p, end, 8 + 8, e_inval);
3773 p += 8 + 8; /* skip gid and cookie */
3775 ceph_decode_32_safe(&p, end, len, e_inval);
3779 if (lock_owner_responded) {
3781 "duplicate lock owners detected");
3786 lock_owner_responded = true;
3787 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
3791 "failed to decode ResponseMessage: %d",
3796 ret = ceph_decode_32(&p);
3800 if (!lock_owner_responded) {
3801 rbd_warn(rbd_dev, "no lock owners detected");
3806 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3815 * Either image request state machine(s) or rbd_add_acquire_lock()
3818 static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
3820 struct rbd_img_request *img_req;
3822 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
3823 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
3825 cancel_delayed_work(&rbd_dev->lock_dwork);
3826 if (!completion_done(&rbd_dev->acquire_wait)) {
3827 rbd_assert(list_empty(&rbd_dev->acquiring_list) &&
3828 list_empty(&rbd_dev->running_list));
3829 rbd_dev->acquire_err = result;
3830 complete_all(&rbd_dev->acquire_wait);
3834 list_for_each_entry(img_req, &rbd_dev->acquiring_list, lock_item) {
3835 mutex_lock(&img_req->state_mutex);
3836 rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK);
3837 rbd_img_schedule(img_req, result);
3838 mutex_unlock(&img_req->state_mutex);
3841 list_splice_tail_init(&rbd_dev->acquiring_list, &rbd_dev->running_list);
3844 static int get_lock_owner_info(struct rbd_device *rbd_dev,
3845 struct ceph_locker **lockers, u32 *num_lockers)
3847 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3852 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3854 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
3855 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3856 &lock_type, &lock_tag, lockers, num_lockers);
3860 if (*num_lockers == 0) {
3861 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
3865 if (strcmp(lock_tag, RBD_LOCK_TAG)) {
3866 rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
3872 if (lock_type == CEPH_CLS_LOCK_SHARED) {
3873 rbd_warn(rbd_dev, "shared lock type detected");
3878 if (strncmp((*lockers)[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
3879 strlen(RBD_LOCK_COOKIE_PREFIX))) {
3880 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
3881 (*lockers)[0].id.cookie);
3891 static int find_watcher(struct rbd_device *rbd_dev,
3892 const struct ceph_locker *locker)
3894 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3895 struct ceph_watch_item *watchers;
3901 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
3902 &rbd_dev->header_oloc, &watchers,
3907 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
3908 for (i = 0; i < num_watchers; i++) {
3910 * Ignore addr->type while comparing. This mimics
3911 * entity_addr_t::get_legacy_str() + strcmp().
3913 if (ceph_addr_equal_no_type(&watchers[i].addr,
3914 &locker->info.addr) &&
3915 watchers[i].cookie == cookie) {
3916 struct rbd_client_id cid = {
3917 .gid = le64_to_cpu(watchers[i].name.num),
3921 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
3922 rbd_dev, cid.gid, cid.handle);
3923 rbd_set_owner_cid(rbd_dev, &cid);
3929 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
3937 * lock_rwsem must be held for write
3939 static int rbd_try_lock(struct rbd_device *rbd_dev)
3941 struct ceph_client *client = rbd_dev->rbd_client->client;
3942 struct ceph_locker *lockers;
3947 ret = rbd_lock(rbd_dev);
3951 /* determine if the current lock holder is still alive */
3952 ret = get_lock_owner_info(rbd_dev, &lockers, &num_lockers);
3956 if (num_lockers == 0)
3959 ret = find_watcher(rbd_dev, lockers);
3961 goto out; /* request lock or error */
3963 rbd_warn(rbd_dev, "breaking header lock owned by %s%llu",
3964 ENTITY_NAME(lockers[0].id.name));
3966 ret = ceph_monc_blocklist_add(&client->monc,
3967 &lockers[0].info.addr);
3969 rbd_warn(rbd_dev, "blocklist of %s%llu failed: %d",
3970 ENTITY_NAME(lockers[0].id.name), ret);
3974 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
3975 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3976 lockers[0].id.cookie,
3977 &lockers[0].id.name);
3978 if (ret && ret != -ENOENT)
3982 ceph_free_lockers(lockers, num_lockers);
3986 ceph_free_lockers(lockers, num_lockers);
3990 static int rbd_post_acquire_action(struct rbd_device *rbd_dev)
3994 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) {
3995 ret = rbd_object_map_open(rbd_dev);
4006 * 1 - caller should call rbd_request_lock()
4009 static int rbd_try_acquire_lock(struct rbd_device *rbd_dev)
4013 down_read(&rbd_dev->lock_rwsem);
4014 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
4015 rbd_dev->lock_state);
4016 if (__rbd_is_lock_owner(rbd_dev)) {
4017 up_read(&rbd_dev->lock_rwsem);
4021 up_read(&rbd_dev->lock_rwsem);
4022 down_write(&rbd_dev->lock_rwsem);
4023 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
4024 rbd_dev->lock_state);
4025 if (__rbd_is_lock_owner(rbd_dev)) {
4026 up_write(&rbd_dev->lock_rwsem);
4030 ret = rbd_try_lock(rbd_dev);
4032 rbd_warn(rbd_dev, "failed to lock header: %d", ret);
4033 if (ret == -EBLOCKLISTED)
4036 ret = 1; /* request lock anyway */
4039 up_write(&rbd_dev->lock_rwsem);
4043 rbd_assert(rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED);
4044 rbd_assert(list_empty(&rbd_dev->running_list));
4046 ret = rbd_post_acquire_action(rbd_dev);
4048 rbd_warn(rbd_dev, "post-acquire action failed: %d", ret);
4050 * Can't stay in RBD_LOCK_STATE_LOCKED because
4051 * rbd_lock_add_request() would let the request through,
4052 * assuming that e.g. object map is locked and loaded.
4054 rbd_unlock(rbd_dev);
4058 wake_lock_waiters(rbd_dev, ret);
4059 up_write(&rbd_dev->lock_rwsem);
4063 static void rbd_acquire_lock(struct work_struct *work)
4065 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4066 struct rbd_device, lock_dwork);
4069 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4071 ret = rbd_try_acquire_lock(rbd_dev);
4073 dout("%s rbd_dev %p ret %d - done\n", __func__, rbd_dev, ret);
4077 ret = rbd_request_lock(rbd_dev);
4078 if (ret == -ETIMEDOUT) {
4079 goto again; /* treat this as a dead client */
4080 } else if (ret == -EROFS) {
4081 rbd_warn(rbd_dev, "peer will not release lock");
4082 down_write(&rbd_dev->lock_rwsem);
4083 wake_lock_waiters(rbd_dev, ret);
4084 up_write(&rbd_dev->lock_rwsem);
4085 } else if (ret < 0) {
4086 rbd_warn(rbd_dev, "error requesting lock: %d", ret);
4087 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4091 * lock owner acked, but resend if we don't see them
4094 dout("%s rbd_dev %p requeuing lock_dwork\n", __func__,
4096 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4097 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
4101 static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
4105 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4106 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
4108 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
4112 * Ensure that all in-flight IO is flushed.
4114 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
4115 rbd_assert(!completion_done(&rbd_dev->releasing_wait));
4116 need_wait = !list_empty(&rbd_dev->running_list);
4117 downgrade_write(&rbd_dev->lock_rwsem);
4119 wait_for_completion(&rbd_dev->releasing_wait);
4120 up_read(&rbd_dev->lock_rwsem);
4122 down_write(&rbd_dev->lock_rwsem);
4123 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
4126 rbd_assert(list_empty(&rbd_dev->running_list));
4130 static void rbd_pre_release_action(struct rbd_device *rbd_dev)
4132 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)
4133 rbd_object_map_close(rbd_dev);
4136 static void __rbd_release_lock(struct rbd_device *rbd_dev)
4138 rbd_assert(list_empty(&rbd_dev->running_list));
4140 rbd_pre_release_action(rbd_dev);
4141 rbd_unlock(rbd_dev);
4145 * lock_rwsem must be held for write
4147 static void rbd_release_lock(struct rbd_device *rbd_dev)
4149 if (!rbd_quiesce_lock(rbd_dev))
4152 __rbd_release_lock(rbd_dev);
4155 * Give others a chance to grab the lock - we would re-acquire
4156 * almost immediately if we got new IO while draining the running
4157 * list otherwise. We need to ack our own notifications, so this
4158 * lock_dwork will be requeued from rbd_handle_released_lock() by
4159 * way of maybe_kick_acquire().
4161 cancel_delayed_work(&rbd_dev->lock_dwork);
4164 static void rbd_release_lock_work(struct work_struct *work)
4166 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
4169 down_write(&rbd_dev->lock_rwsem);
4170 rbd_release_lock(rbd_dev);
4171 up_write(&rbd_dev->lock_rwsem);
4174 static void maybe_kick_acquire(struct rbd_device *rbd_dev)
4178 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4179 if (__rbd_is_lock_owner(rbd_dev))
4182 spin_lock(&rbd_dev->lock_lists_lock);
4183 have_requests = !list_empty(&rbd_dev->acquiring_list);
4184 spin_unlock(&rbd_dev->lock_lists_lock);
4185 if (have_requests || delayed_work_pending(&rbd_dev->lock_dwork)) {
4186 dout("%s rbd_dev %p kicking lock_dwork\n", __func__, rbd_dev);
4187 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4191 static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
4194 struct rbd_client_id cid = { 0 };
4196 if (struct_v >= 2) {
4197 cid.gid = ceph_decode_64(p);
4198 cid.handle = ceph_decode_64(p);
4201 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4203 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
4204 down_write(&rbd_dev->lock_rwsem);
4205 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
4207 * we already know that the remote client is
4210 up_write(&rbd_dev->lock_rwsem);
4214 rbd_set_owner_cid(rbd_dev, &cid);
4215 downgrade_write(&rbd_dev->lock_rwsem);
4217 down_read(&rbd_dev->lock_rwsem);
4220 maybe_kick_acquire(rbd_dev);
4221 up_read(&rbd_dev->lock_rwsem);
4224 static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
4227 struct rbd_client_id cid = { 0 };
4229 if (struct_v >= 2) {
4230 cid.gid = ceph_decode_64(p);
4231 cid.handle = ceph_decode_64(p);
4234 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4236 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
4237 down_write(&rbd_dev->lock_rwsem);
4238 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
4239 dout("%s rbd_dev %p unexpected owner, cid %llu-%llu != owner_cid %llu-%llu\n",
4240 __func__, rbd_dev, cid.gid, cid.handle,
4241 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
4242 up_write(&rbd_dev->lock_rwsem);
4246 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4247 downgrade_write(&rbd_dev->lock_rwsem);
4249 down_read(&rbd_dev->lock_rwsem);
4252 maybe_kick_acquire(rbd_dev);
4253 up_read(&rbd_dev->lock_rwsem);
4257 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no
4258 * ResponseMessage is needed.
4260 static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
4263 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
4264 struct rbd_client_id cid = { 0 };
4267 if (struct_v >= 2) {
4268 cid.gid = ceph_decode_64(p);
4269 cid.handle = ceph_decode_64(p);
4272 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4274 if (rbd_cid_equal(&cid, &my_cid))
4277 down_read(&rbd_dev->lock_rwsem);
4278 if (__rbd_is_lock_owner(rbd_dev)) {
4279 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED &&
4280 rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid))
4284 * encode ResponseMessage(0) so the peer can detect
4289 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
4290 if (!rbd_dev->opts->exclusive) {
4291 dout("%s rbd_dev %p queueing unlock_work\n",
4293 queue_work(rbd_dev->task_wq,
4294 &rbd_dev->unlock_work);
4296 /* refuse to release the lock */
4303 up_read(&rbd_dev->lock_rwsem);
4307 static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
4308 u64 notify_id, u64 cookie, s32 *result)
4310 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4311 char buf[4 + CEPH_ENCODING_START_BLK_LEN];
4312 int buf_size = sizeof(buf);
4318 /* encode ResponseMessage */
4319 ceph_start_encoding(&p, 1, 1,
4320 buf_size - CEPH_ENCODING_START_BLK_LEN);
4321 ceph_encode_32(&p, *result);
4326 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
4327 &rbd_dev->header_oloc, notify_id, cookie,
4330 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
4333 static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
4336 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4337 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
4340 static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
4341 u64 notify_id, u64 cookie, s32 result)
4343 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
4344 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
4347 static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
4348 u64 notifier_id, void *data, size_t data_len)
4350 struct rbd_device *rbd_dev = arg;
4352 void *const end = p + data_len;
4358 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
4359 __func__, rbd_dev, cookie, notify_id, data_len);
4361 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
4364 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
4369 notify_op = ceph_decode_32(&p);
4371 /* legacy notification for header updates */
4372 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
4376 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
4377 switch (notify_op) {
4378 case RBD_NOTIFY_OP_ACQUIRED_LOCK:
4379 rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
4380 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4382 case RBD_NOTIFY_OP_RELEASED_LOCK:
4383 rbd_handle_released_lock(rbd_dev, struct_v, &p);
4384 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4386 case RBD_NOTIFY_OP_REQUEST_LOCK:
4387 ret = rbd_handle_request_lock(rbd_dev, struct_v, &p);
4389 rbd_acknowledge_notify_result(rbd_dev, notify_id,
4392 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4394 case RBD_NOTIFY_OP_HEADER_UPDATE:
4395 ret = rbd_dev_refresh(rbd_dev);
4397 rbd_warn(rbd_dev, "refresh failed: %d", ret);
4399 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4402 if (rbd_is_lock_owner(rbd_dev))
4403 rbd_acknowledge_notify_result(rbd_dev, notify_id,
4404 cookie, -EOPNOTSUPP);
4406 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4411 static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
4413 static void rbd_watch_errcb(void *arg, u64 cookie, int err)
4415 struct rbd_device *rbd_dev = arg;
4417 rbd_warn(rbd_dev, "encountered watch error: %d", err);
4419 down_write(&rbd_dev->lock_rwsem);
4420 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4421 up_write(&rbd_dev->lock_rwsem);
4423 mutex_lock(&rbd_dev->watch_mutex);
4424 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
4425 __rbd_unregister_watch(rbd_dev);
4426 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
4428 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
4430 mutex_unlock(&rbd_dev->watch_mutex);
4434 * watch_mutex must be locked
4436 static int __rbd_register_watch(struct rbd_device *rbd_dev)
4438 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4439 struct ceph_osd_linger_request *handle;
4441 rbd_assert(!rbd_dev->watch_handle);
4442 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4444 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
4445 &rbd_dev->header_oloc, rbd_watch_cb,
4446 rbd_watch_errcb, rbd_dev);
4448 return PTR_ERR(handle);
4450 rbd_dev->watch_handle = handle;
4455 * watch_mutex must be locked
4457 static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
4459 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4462 rbd_assert(rbd_dev->watch_handle);
4463 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4465 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
4467 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
4469 rbd_dev->watch_handle = NULL;
4472 static int rbd_register_watch(struct rbd_device *rbd_dev)
4476 mutex_lock(&rbd_dev->watch_mutex);
4477 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
4478 ret = __rbd_register_watch(rbd_dev);
4482 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4483 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4486 mutex_unlock(&rbd_dev->watch_mutex);
4490 static void cancel_tasks_sync(struct rbd_device *rbd_dev)
4492 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4494 cancel_work_sync(&rbd_dev->acquired_lock_work);
4495 cancel_work_sync(&rbd_dev->released_lock_work);
4496 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
4497 cancel_work_sync(&rbd_dev->unlock_work);
4501 * header_rwsem must not be held to avoid a deadlock with
4502 * rbd_dev_refresh() when flushing notifies.
4504 static void rbd_unregister_watch(struct rbd_device *rbd_dev)
4506 cancel_tasks_sync(rbd_dev);
4508 mutex_lock(&rbd_dev->watch_mutex);
4509 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
4510 __rbd_unregister_watch(rbd_dev);
4511 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
4512 mutex_unlock(&rbd_dev->watch_mutex);
4514 cancel_delayed_work_sync(&rbd_dev->watch_dwork);
4515 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
4519 * lock_rwsem must be held for write
4521 static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
4523 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4527 if (!rbd_quiesce_lock(rbd_dev))
4530 format_lock_cookie(rbd_dev, cookie);
4531 ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid,
4532 &rbd_dev->header_oloc, RBD_LOCK_NAME,
4533 CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie,
4534 RBD_LOCK_TAG, cookie);
4536 if (ret != -EOPNOTSUPP)
4537 rbd_warn(rbd_dev, "failed to update lock cookie: %d",
4541 * Lock cookie cannot be updated on older OSDs, so do
4542 * a manual release and queue an acquire.
4544 __rbd_release_lock(rbd_dev);
4545 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4547 __rbd_lock(rbd_dev, cookie);
4548 wake_lock_waiters(rbd_dev, 0);
4552 static void rbd_reregister_watch(struct work_struct *work)
4554 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4555 struct rbd_device, watch_dwork);
4558 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4560 mutex_lock(&rbd_dev->watch_mutex);
4561 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
4562 mutex_unlock(&rbd_dev->watch_mutex);
4566 ret = __rbd_register_watch(rbd_dev);
4568 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
4569 if (ret != -EBLOCKLISTED && ret != -ENOENT) {
4570 queue_delayed_work(rbd_dev->task_wq,
4571 &rbd_dev->watch_dwork,
4573 mutex_unlock(&rbd_dev->watch_mutex);
4577 mutex_unlock(&rbd_dev->watch_mutex);
4578 down_write(&rbd_dev->lock_rwsem);
4579 wake_lock_waiters(rbd_dev, ret);
4580 up_write(&rbd_dev->lock_rwsem);
4584 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4585 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4586 mutex_unlock(&rbd_dev->watch_mutex);
4588 down_write(&rbd_dev->lock_rwsem);
4589 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
4590 rbd_reacquire_lock(rbd_dev);
4591 up_write(&rbd_dev->lock_rwsem);
4593 ret = rbd_dev_refresh(rbd_dev);
4595 rbd_warn(rbd_dev, "reregistration refresh failed: %d", ret);
4599 * Synchronous osd object method call. Returns the number of bytes
4600 * returned in the outbound buffer, or a negative error code.
4602 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
4603 struct ceph_object_id *oid,
4604 struct ceph_object_locator *oloc,
4605 const char *method_name,
4606 const void *outbound,
4607 size_t outbound_size,
4609 size_t inbound_size)
4611 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4612 struct page *req_page = NULL;
4613 struct page *reply_page;
4617 * Method calls are ultimately read operations. The result
4618 * should placed into the inbound buffer provided. They
4619 * also supply outbound data--parameters for the object
4620 * method. Currently if this is present it will be a
4624 if (outbound_size > PAGE_SIZE)
4627 req_page = alloc_page(GFP_KERNEL);
4631 memcpy(page_address(req_page), outbound, outbound_size);
4634 reply_page = alloc_page(GFP_KERNEL);
4637 __free_page(req_page);
4641 ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name,
4642 CEPH_OSD_FLAG_READ, req_page, outbound_size,
4643 &reply_page, &inbound_size);
4645 memcpy(inbound, page_address(reply_page), inbound_size);
4650 __free_page(req_page);
4651 __free_page(reply_page);
4655 static void rbd_queue_workfn(struct work_struct *work)
4657 struct rbd_img_request *img_request =
4658 container_of(work, struct rbd_img_request, work);
4659 struct rbd_device *rbd_dev = img_request->rbd_dev;
4660 enum obj_operation_type op_type = img_request->op_type;
4661 struct request *rq = blk_mq_rq_from_pdu(img_request);
4662 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
4663 u64 length = blk_rq_bytes(rq);
4667 /* Ignore/skip any zero-length requests */
4669 dout("%s: zero-length request\n", __func__);
4671 goto err_img_request;
4674 blk_mq_start_request(rq);
4676 down_read(&rbd_dev->header_rwsem);
4677 mapping_size = rbd_dev->mapping.size;
4678 rbd_img_capture_header(img_request);
4679 up_read(&rbd_dev->header_rwsem);
4681 if (offset + length > mapping_size) {
4682 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
4683 length, mapping_size);
4685 goto err_img_request;
4688 dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev,
4689 img_request, obj_op_name(op_type), offset, length);
4691 if (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_ZEROOUT)
4692 result = rbd_img_fill_nodata(img_request, offset, length);
4694 result = rbd_img_fill_from_bio(img_request, offset, length,
4697 goto err_img_request;
4699 rbd_img_handle_request(img_request, 0);
4703 rbd_img_request_destroy(img_request);
4705 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
4706 obj_op_name(op_type), length, offset, result);
4707 blk_mq_end_request(rq, errno_to_blk_status(result));
4710 static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
4711 const struct blk_mq_queue_data *bd)
4713 struct rbd_device *rbd_dev = hctx->queue->queuedata;
4714 struct rbd_img_request *img_req = blk_mq_rq_to_pdu(bd->rq);
4715 enum obj_operation_type op_type;
4717 switch (req_op(bd->rq)) {
4718 case REQ_OP_DISCARD:
4719 op_type = OBJ_OP_DISCARD;
4721 case REQ_OP_WRITE_ZEROES:
4722 op_type = OBJ_OP_ZEROOUT;
4725 op_type = OBJ_OP_WRITE;
4728 op_type = OBJ_OP_READ;
4731 rbd_warn(rbd_dev, "unknown req_op %d", req_op(bd->rq));
4732 return BLK_STS_IOERR;
4735 rbd_img_request_init(img_req, rbd_dev, op_type);
4737 if (rbd_img_is_write(img_req)) {
4738 if (rbd_is_ro(rbd_dev)) {
4739 rbd_warn(rbd_dev, "%s on read-only mapping",
4740 obj_op_name(img_req->op_type));
4741 return BLK_STS_IOERR;
4743 rbd_assert(!rbd_is_snap(rbd_dev));
4746 INIT_WORK(&img_req->work, rbd_queue_workfn);
4747 queue_work(rbd_wq, &img_req->work);
4751 static void rbd_free_disk(struct rbd_device *rbd_dev)
4753 blk_cleanup_disk(rbd_dev->disk);
4754 blk_mq_free_tag_set(&rbd_dev->tag_set);
4755 rbd_dev->disk = NULL;
4758 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
4759 struct ceph_object_id *oid,
4760 struct ceph_object_locator *oloc,
4761 void *buf, int buf_len)
4764 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4765 struct ceph_osd_request *req;
4766 struct page **pages;
4767 int num_pages = calc_pages_for(0, buf_len);
4770 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
4774 ceph_oid_copy(&req->r_base_oid, oid);
4775 ceph_oloc_copy(&req->r_base_oloc, oloc);
4776 req->r_flags = CEPH_OSD_FLAG_READ;
4778 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
4779 if (IS_ERR(pages)) {
4780 ret = PTR_ERR(pages);
4784 osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0);
4785 osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false,
4788 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
4792 ceph_osdc_start_request(osdc, req, false);
4793 ret = ceph_osdc_wait_request(osdc, req);
4795 ceph_copy_from_page_vector(pages, buf, 0, ret);
4798 ceph_osdc_put_request(req);
4803 * Read the complete header for the given rbd device. On successful
4804 * return, the rbd_dev->header field will contain up-to-date
4805 * information about the image.
4807 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
4809 struct rbd_image_header_ondisk *ondisk = NULL;
4816 * The complete header will include an array of its 64-bit
4817 * snapshot ids, followed by the names of those snapshots as
4818 * a contiguous block of NUL-terminated strings. Note that
4819 * the number of snapshots could change by the time we read
4820 * it in, in which case we re-read it.
4827 size = sizeof (*ondisk);
4828 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
4830 ondisk = kmalloc(size, GFP_KERNEL);
4834 ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid,
4835 &rbd_dev->header_oloc, ondisk, size);
4838 if ((size_t)ret < size) {
4840 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
4844 if (!rbd_dev_ondisk_valid(ondisk)) {
4846 rbd_warn(rbd_dev, "invalid header");
4850 names_size = le64_to_cpu(ondisk->snap_names_len);
4851 want_count = snap_count;
4852 snap_count = le32_to_cpu(ondisk->snap_count);
4853 } while (snap_count != want_count);
4855 ret = rbd_header_from_disk(rbd_dev, ondisk);
4862 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
4867 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
4868 * try to update its size. If REMOVING is set, updating size
4869 * is just useless work since the device can't be opened.
4871 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
4872 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
4873 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
4874 dout("setting size to %llu sectors", (unsigned long long)size);
4875 set_capacity_and_notify(rbd_dev->disk, size);
4879 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
4884 down_write(&rbd_dev->header_rwsem);
4885 mapping_size = rbd_dev->mapping.size;
4887 ret = rbd_dev_header_info(rbd_dev);
4892 * If there is a parent, see if it has disappeared due to the
4893 * mapped image getting flattened.
4895 if (rbd_dev->parent) {
4896 ret = rbd_dev_v2_parent_info(rbd_dev);
4901 rbd_assert(!rbd_is_snap(rbd_dev));
4902 rbd_dev->mapping.size = rbd_dev->header.image_size;
4905 up_write(&rbd_dev->header_rwsem);
4906 if (!ret && mapping_size != rbd_dev->mapping.size)
4907 rbd_dev_update_size(rbd_dev);
4912 static const struct blk_mq_ops rbd_mq_ops = {
4913 .queue_rq = rbd_queue_rq,
4916 static int rbd_init_disk(struct rbd_device *rbd_dev)
4918 struct gendisk *disk;
4919 struct request_queue *q;
4920 unsigned int objset_bytes =
4921 rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
4924 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
4925 rbd_dev->tag_set.ops = &rbd_mq_ops;
4926 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
4927 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
4928 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
4929 rbd_dev->tag_set.nr_hw_queues = num_present_cpus();
4930 rbd_dev->tag_set.cmd_size = sizeof(struct rbd_img_request);
4932 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
4936 disk = blk_mq_alloc_disk(&rbd_dev->tag_set, rbd_dev);
4938 err = PTR_ERR(disk);
4943 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
4945 disk->major = rbd_dev->major;
4946 disk->first_minor = rbd_dev->minor;
4948 disk->minors = (1 << RBD_SINGLE_MAJOR_PART_SHIFT);
4949 disk->flags |= GENHD_FL_EXT_DEVT;
4951 disk->minors = RBD_MINORS_PER_MAJOR;
4953 disk->fops = &rbd_bd_ops;
4955 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
4956 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
4958 blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT);
4959 q->limits.max_sectors = queue_max_hw_sectors(q);
4960 blk_queue_max_segments(q, USHRT_MAX);
4961 blk_queue_max_segment_size(q, UINT_MAX);
4962 blk_queue_io_min(q, rbd_dev->opts->alloc_size);
4963 blk_queue_io_opt(q, rbd_dev->opts->alloc_size);
4965 if (rbd_dev->opts->trim) {
4966 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
4967 q->limits.discard_granularity = rbd_dev->opts->alloc_size;
4968 blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
4969 blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
4972 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
4973 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
4975 rbd_dev->disk = disk;
4979 blk_mq_free_tag_set(&rbd_dev->tag_set);
4987 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
4989 return container_of(dev, struct rbd_device, dev);
4992 static ssize_t rbd_size_show(struct device *dev,
4993 struct device_attribute *attr, char *buf)
4995 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4997 return sprintf(buf, "%llu\n",
4998 (unsigned long long)rbd_dev->mapping.size);
5001 static ssize_t rbd_features_show(struct device *dev,
5002 struct device_attribute *attr, char *buf)
5004 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5006 return sprintf(buf, "0x%016llx\n", rbd_dev->header.features);
5009 static ssize_t rbd_major_show(struct device *dev,
5010 struct device_attribute *attr, char *buf)
5012 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5015 return sprintf(buf, "%d\n", rbd_dev->major);
5017 return sprintf(buf, "(none)\n");
5020 static ssize_t rbd_minor_show(struct device *dev,
5021 struct device_attribute *attr, char *buf)
5023 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5025 return sprintf(buf, "%d\n", rbd_dev->minor);
5028 static ssize_t rbd_client_addr_show(struct device *dev,
5029 struct device_attribute *attr, char *buf)
5031 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5032 struct ceph_entity_addr *client_addr =
5033 ceph_client_addr(rbd_dev->rbd_client->client);
5035 return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
5036 le32_to_cpu(client_addr->nonce));
5039 static ssize_t rbd_client_id_show(struct device *dev,
5040 struct device_attribute *attr, char *buf)
5042 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5044 return sprintf(buf, "client%lld\n",
5045 ceph_client_gid(rbd_dev->rbd_client->client));
5048 static ssize_t rbd_cluster_fsid_show(struct device *dev,
5049 struct device_attribute *attr, char *buf)
5051 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5053 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
5056 static ssize_t rbd_config_info_show(struct device *dev,
5057 struct device_attribute *attr, char *buf)
5059 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5061 if (!capable(CAP_SYS_ADMIN))
5064 return sprintf(buf, "%s\n", rbd_dev->config_info);
5067 static ssize_t rbd_pool_show(struct device *dev,
5068 struct device_attribute *attr, char *buf)
5070 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5072 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
5075 static ssize_t rbd_pool_id_show(struct device *dev,
5076 struct device_attribute *attr, char *buf)
5078 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5080 return sprintf(buf, "%llu\n",
5081 (unsigned long long) rbd_dev->spec->pool_id);
5084 static ssize_t rbd_pool_ns_show(struct device *dev,
5085 struct device_attribute *attr, char *buf)
5087 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5089 return sprintf(buf, "%s\n", rbd_dev->spec->pool_ns ?: "");
5092 static ssize_t rbd_name_show(struct device *dev,
5093 struct device_attribute *attr, char *buf)
5095 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5097 if (rbd_dev->spec->image_name)
5098 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
5100 return sprintf(buf, "(unknown)\n");
5103 static ssize_t rbd_image_id_show(struct device *dev,
5104 struct device_attribute *attr, char *buf)
5106 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5108 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
5112 * Shows the name of the currently-mapped snapshot (or
5113 * RBD_SNAP_HEAD_NAME for the base image).
5115 static ssize_t rbd_snap_show(struct device *dev,
5116 struct device_attribute *attr,
5119 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5121 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
5124 static ssize_t rbd_snap_id_show(struct device *dev,
5125 struct device_attribute *attr, char *buf)
5127 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5129 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
5133 * For a v2 image, shows the chain of parent images, separated by empty
5134 * lines. For v1 images or if there is no parent, shows "(no parent
5137 static ssize_t rbd_parent_show(struct device *dev,
5138 struct device_attribute *attr,
5141 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5144 if (!rbd_dev->parent)
5145 return sprintf(buf, "(no parent image)\n");
5147 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
5148 struct rbd_spec *spec = rbd_dev->parent_spec;
5150 count += sprintf(&buf[count], "%s"
5151 "pool_id %llu\npool_name %s\n"
5153 "image_id %s\nimage_name %s\n"
5154 "snap_id %llu\nsnap_name %s\n"
5156 !count ? "" : "\n", /* first? */
5157 spec->pool_id, spec->pool_name,
5158 spec->pool_ns ?: "",
5159 spec->image_id, spec->image_name ?: "(unknown)",
5160 spec->snap_id, spec->snap_name,
5161 rbd_dev->parent_overlap);
5167 static ssize_t rbd_image_refresh(struct device *dev,
5168 struct device_attribute *attr,
5172 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5175 if (!capable(CAP_SYS_ADMIN))
5178 ret = rbd_dev_refresh(rbd_dev);
5185 static DEVICE_ATTR(size, 0444, rbd_size_show, NULL);
5186 static DEVICE_ATTR(features, 0444, rbd_features_show, NULL);
5187 static DEVICE_ATTR(major, 0444, rbd_major_show, NULL);
5188 static DEVICE_ATTR(minor, 0444, rbd_minor_show, NULL);
5189 static DEVICE_ATTR(client_addr, 0444, rbd_client_addr_show, NULL);
5190 static DEVICE_ATTR(client_id, 0444, rbd_client_id_show, NULL);
5191 static DEVICE_ATTR(cluster_fsid, 0444, rbd_cluster_fsid_show, NULL);
5192 static DEVICE_ATTR(config_info, 0400, rbd_config_info_show, NULL);
5193 static DEVICE_ATTR(pool, 0444, rbd_pool_show, NULL);
5194 static DEVICE_ATTR(pool_id, 0444, rbd_pool_id_show, NULL);
5195 static DEVICE_ATTR(pool_ns, 0444, rbd_pool_ns_show, NULL);
5196 static DEVICE_ATTR(name, 0444, rbd_name_show, NULL);
5197 static DEVICE_ATTR(image_id, 0444, rbd_image_id_show, NULL);
5198 static DEVICE_ATTR(refresh, 0200, NULL, rbd_image_refresh);
5199 static DEVICE_ATTR(current_snap, 0444, rbd_snap_show, NULL);
5200 static DEVICE_ATTR(snap_id, 0444, rbd_snap_id_show, NULL);
5201 static DEVICE_ATTR(parent, 0444, rbd_parent_show, NULL);
5203 static struct attribute *rbd_attrs[] = {
5204 &dev_attr_size.attr,
5205 &dev_attr_features.attr,
5206 &dev_attr_major.attr,
5207 &dev_attr_minor.attr,
5208 &dev_attr_client_addr.attr,
5209 &dev_attr_client_id.attr,
5210 &dev_attr_cluster_fsid.attr,
5211 &dev_attr_config_info.attr,
5212 &dev_attr_pool.attr,
5213 &dev_attr_pool_id.attr,
5214 &dev_attr_pool_ns.attr,
5215 &dev_attr_name.attr,
5216 &dev_attr_image_id.attr,
5217 &dev_attr_current_snap.attr,
5218 &dev_attr_snap_id.attr,
5219 &dev_attr_parent.attr,
5220 &dev_attr_refresh.attr,
5224 static struct attribute_group rbd_attr_group = {
5228 static const struct attribute_group *rbd_attr_groups[] = {
5233 static void rbd_dev_release(struct device *dev);
5235 static const struct device_type rbd_device_type = {
5237 .groups = rbd_attr_groups,
5238 .release = rbd_dev_release,
5241 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
5243 kref_get(&spec->kref);
5248 static void rbd_spec_free(struct kref *kref);
5249 static void rbd_spec_put(struct rbd_spec *spec)
5252 kref_put(&spec->kref, rbd_spec_free);
5255 static struct rbd_spec *rbd_spec_alloc(void)
5257 struct rbd_spec *spec;
5259 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
5263 spec->pool_id = CEPH_NOPOOL;
5264 spec->snap_id = CEPH_NOSNAP;
5265 kref_init(&spec->kref);
5270 static void rbd_spec_free(struct kref *kref)
5272 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
5274 kfree(spec->pool_name);
5275 kfree(spec->pool_ns);
5276 kfree(spec->image_id);
5277 kfree(spec->image_name);
5278 kfree(spec->snap_name);
5282 static void rbd_dev_free(struct rbd_device *rbd_dev)
5284 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
5285 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
5287 ceph_oid_destroy(&rbd_dev->header_oid);
5288 ceph_oloc_destroy(&rbd_dev->header_oloc);
5289 kfree(rbd_dev->config_info);
5291 rbd_put_client(rbd_dev->rbd_client);
5292 rbd_spec_put(rbd_dev->spec);
5293 kfree(rbd_dev->opts);
5297 static void rbd_dev_release(struct device *dev)
5299 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5300 bool need_put = !!rbd_dev->opts;
5303 destroy_workqueue(rbd_dev->task_wq);
5304 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5307 rbd_dev_free(rbd_dev);
5310 * This is racy, but way better than putting module outside of
5311 * the release callback. The race window is pretty small, so
5312 * doing something similar to dm (dm-builtin.c) is overkill.
5315 module_put(THIS_MODULE);
5318 static struct rbd_device *__rbd_dev_create(struct rbd_client *rbdc,
5319 struct rbd_spec *spec)
5321 struct rbd_device *rbd_dev;
5323 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
5327 spin_lock_init(&rbd_dev->lock);
5328 INIT_LIST_HEAD(&rbd_dev->node);
5329 init_rwsem(&rbd_dev->header_rwsem);
5331 rbd_dev->header.data_pool_id = CEPH_NOPOOL;
5332 ceph_oid_init(&rbd_dev->header_oid);
5333 rbd_dev->header_oloc.pool = spec->pool_id;
5334 if (spec->pool_ns) {
5335 WARN_ON(!*spec->pool_ns);
5336 rbd_dev->header_oloc.pool_ns =
5337 ceph_find_or_create_string(spec->pool_ns,
5338 strlen(spec->pool_ns));
5341 mutex_init(&rbd_dev->watch_mutex);
5342 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
5343 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
5345 init_rwsem(&rbd_dev->lock_rwsem);
5346 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
5347 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
5348 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
5349 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
5350 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
5351 spin_lock_init(&rbd_dev->lock_lists_lock);
5352 INIT_LIST_HEAD(&rbd_dev->acquiring_list);
5353 INIT_LIST_HEAD(&rbd_dev->running_list);
5354 init_completion(&rbd_dev->acquire_wait);
5355 init_completion(&rbd_dev->releasing_wait);
5357 spin_lock_init(&rbd_dev->object_map_lock);
5359 rbd_dev->dev.bus = &rbd_bus_type;
5360 rbd_dev->dev.type = &rbd_device_type;
5361 rbd_dev->dev.parent = &rbd_root_dev;
5362 device_initialize(&rbd_dev->dev);
5364 rbd_dev->rbd_client = rbdc;
5365 rbd_dev->spec = spec;
5371 * Create a mapping rbd_dev.
5373 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
5374 struct rbd_spec *spec,
5375 struct rbd_options *opts)
5377 struct rbd_device *rbd_dev;
5379 rbd_dev = __rbd_dev_create(rbdc, spec);
5383 rbd_dev->opts = opts;
5385 /* get an id and fill in device name */
5386 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
5387 minor_to_rbd_dev_id(1 << MINORBITS),
5389 if (rbd_dev->dev_id < 0)
5392 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
5393 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
5395 if (!rbd_dev->task_wq)
5398 /* we have a ref from do_rbd_add() */
5399 __module_get(THIS_MODULE);
5401 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
5405 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5407 rbd_dev_free(rbd_dev);
5411 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
5414 put_device(&rbd_dev->dev);
5418 * Get the size and object order for an image snapshot, or if
5419 * snap_id is CEPH_NOSNAP, gets this information for the base
5422 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
5423 u8 *order, u64 *snap_size)
5425 __le64 snapid = cpu_to_le64(snap_id);
5430 } __attribute__ ((packed)) size_buf = { 0 };
5432 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5433 &rbd_dev->header_oloc, "get_size",
5434 &snapid, sizeof(snapid),
5435 &size_buf, sizeof(size_buf));
5436 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5439 if (ret < sizeof (size_buf))
5443 *order = size_buf.order;
5444 dout(" order %u", (unsigned int)*order);
5446 *snap_size = le64_to_cpu(size_buf.size);
5448 dout(" snap_id 0x%016llx snap_size = %llu\n",
5449 (unsigned long long)snap_id,
5450 (unsigned long long)*snap_size);
5455 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
5457 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
5458 &rbd_dev->header.obj_order,
5459 &rbd_dev->header.image_size);
5462 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
5469 /* Response will be an encoded string, which includes a length */
5470 size = sizeof(__le32) + RBD_OBJ_PREFIX_LEN_MAX;
5471 reply_buf = kzalloc(size, GFP_KERNEL);
5475 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5476 &rbd_dev->header_oloc, "get_object_prefix",
5477 NULL, 0, reply_buf, size);
5478 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5483 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
5484 p + ret, NULL, GFP_NOIO);
5487 if (IS_ERR(rbd_dev->header.object_prefix)) {
5488 ret = PTR_ERR(rbd_dev->header.object_prefix);
5489 rbd_dev->header.object_prefix = NULL;
5491 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
5499 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
5500 bool read_only, u64 *snap_features)
5509 } __attribute__ ((packed)) features_buf = { 0 };
5513 features_in.snap_id = cpu_to_le64(snap_id);
5514 features_in.read_only = read_only;
5516 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5517 &rbd_dev->header_oloc, "get_features",
5518 &features_in, sizeof(features_in),
5519 &features_buf, sizeof(features_buf));
5520 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5523 if (ret < sizeof (features_buf))
5526 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
5528 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
5533 *snap_features = le64_to_cpu(features_buf.features);
5535 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
5536 (unsigned long long)snap_id,
5537 (unsigned long long)*snap_features,
5538 (unsigned long long)le64_to_cpu(features_buf.incompat));
5543 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
5545 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
5547 &rbd_dev->header.features);
5551 * These are generic image flags, but since they are used only for
5552 * object map, store them in rbd_dev->object_map_flags.
5554 * For the same reason, this function is called only on object map
5555 * (re)load and not on header refresh.
5557 static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev)
5559 __le64 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
5563 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5564 &rbd_dev->header_oloc, "get_flags",
5565 &snapid, sizeof(snapid),
5566 &flags, sizeof(flags));
5569 if (ret < sizeof(flags))
5572 rbd_dev->object_map_flags = le64_to_cpu(flags);
5576 struct parent_image_info {
5578 const char *pool_ns;
5579 const char *image_id;
5587 * The caller is responsible for @pii.
5589 static int decode_parent_image_spec(void **p, void *end,
5590 struct parent_image_info *pii)
5596 ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",
5597 &struct_v, &struct_len);
5601 ceph_decode_64_safe(p, end, pii->pool_id, e_inval);
5602 pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
5603 if (IS_ERR(pii->pool_ns)) {
5604 ret = PTR_ERR(pii->pool_ns);
5605 pii->pool_ns = NULL;
5608 pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
5609 if (IS_ERR(pii->image_id)) {
5610 ret = PTR_ERR(pii->image_id);
5611 pii->image_id = NULL;
5614 ceph_decode_64_safe(p, end, pii->snap_id, e_inval);
5621 static int __get_parent_info(struct rbd_device *rbd_dev,
5622 struct page *req_page,
5623 struct page *reply_page,
5624 struct parent_image_info *pii)
5626 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5627 size_t reply_len = PAGE_SIZE;
5631 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5632 "rbd", "parent_get", CEPH_OSD_FLAG_READ,
5633 req_page, sizeof(u64), &reply_page, &reply_len);
5635 return ret == -EOPNOTSUPP ? 1 : ret;
5637 p = page_address(reply_page);
5638 end = p + reply_len;
5639 ret = decode_parent_image_spec(&p, end, pii);
5643 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5644 "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ,
5645 req_page, sizeof(u64), &reply_page, &reply_len);
5649 p = page_address(reply_page);
5650 end = p + reply_len;
5651 ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);
5652 if (pii->has_overlap)
5653 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
5662 * The caller is responsible for @pii.
5664 static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
5665 struct page *req_page,
5666 struct page *reply_page,
5667 struct parent_image_info *pii)
5669 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5670 size_t reply_len = PAGE_SIZE;
5674 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5675 "rbd", "get_parent", CEPH_OSD_FLAG_READ,
5676 req_page, sizeof(u64), &reply_page, &reply_len);
5680 p = page_address(reply_page);
5681 end = p + reply_len;
5682 ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);
5683 pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
5684 if (IS_ERR(pii->image_id)) {
5685 ret = PTR_ERR(pii->image_id);
5686 pii->image_id = NULL;
5689 ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);
5690 pii->has_overlap = true;
5691 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
5699 static int get_parent_info(struct rbd_device *rbd_dev,
5700 struct parent_image_info *pii)
5702 struct page *req_page, *reply_page;
5706 req_page = alloc_page(GFP_KERNEL);
5710 reply_page = alloc_page(GFP_KERNEL);
5712 __free_page(req_page);
5716 p = page_address(req_page);
5717 ceph_encode_64(&p, rbd_dev->spec->snap_id);
5718 ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
5720 ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
5723 __free_page(req_page);
5724 __free_page(reply_page);
5728 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
5730 struct rbd_spec *parent_spec;
5731 struct parent_image_info pii = { 0 };
5734 parent_spec = rbd_spec_alloc();
5738 ret = get_parent_info(rbd_dev, &pii);
5742 dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
5743 __func__, pii.pool_id, pii.pool_ns, pii.image_id, pii.snap_id,
5744 pii.has_overlap, pii.overlap);
5746 if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap) {
5748 * Either the parent never existed, or we have
5749 * record of it but the image got flattened so it no
5750 * longer has a parent. When the parent of a
5751 * layered image disappears we immediately set the
5752 * overlap to 0. The effect of this is that all new
5753 * requests will be treated as if the image had no
5756 * If !pii.has_overlap, the parent image spec is not
5757 * applicable. It's there to avoid duplication in each
5760 if (rbd_dev->parent_overlap) {
5761 rbd_dev->parent_overlap = 0;
5762 rbd_dev_parent_put(rbd_dev);
5763 pr_info("%s: clone image has been flattened\n",
5764 rbd_dev->disk->disk_name);
5767 goto out; /* No parent? No problem. */
5770 /* The ceph file layout needs to fit pool id in 32 bits */
5773 if (pii.pool_id > (u64)U32_MAX) {
5774 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
5775 (unsigned long long)pii.pool_id, U32_MAX);
5780 * The parent won't change (except when the clone is
5781 * flattened, already handled that). So we only need to
5782 * record the parent spec we have not already done so.
5784 if (!rbd_dev->parent_spec) {
5785 parent_spec->pool_id = pii.pool_id;
5786 if (pii.pool_ns && *pii.pool_ns) {
5787 parent_spec->pool_ns = pii.pool_ns;
5790 parent_spec->image_id = pii.image_id;
5791 pii.image_id = NULL;
5792 parent_spec->snap_id = pii.snap_id;
5794 rbd_dev->parent_spec = parent_spec;
5795 parent_spec = NULL; /* rbd_dev now owns this */
5799 * We always update the parent overlap. If it's zero we issue
5800 * a warning, as we will proceed as if there was no parent.
5804 /* refresh, careful to warn just once */
5805 if (rbd_dev->parent_overlap)
5807 "clone now standalone (overlap became 0)");
5810 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
5813 rbd_dev->parent_overlap = pii.overlap;
5819 kfree(pii.image_id);
5820 rbd_spec_put(parent_spec);
5824 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
5828 __le64 stripe_count;
5829 } __attribute__ ((packed)) striping_info_buf = { 0 };
5830 size_t size = sizeof (striping_info_buf);
5834 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5835 &rbd_dev->header_oloc, "get_stripe_unit_count",
5836 NULL, 0, &striping_info_buf, size);
5837 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5843 p = &striping_info_buf;
5844 rbd_dev->header.stripe_unit = ceph_decode_64(&p);
5845 rbd_dev->header.stripe_count = ceph_decode_64(&p);
5849 static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev)
5851 __le64 data_pool_id;
5854 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5855 &rbd_dev->header_oloc, "get_data_pool",
5856 NULL, 0, &data_pool_id, sizeof(data_pool_id));
5859 if (ret < sizeof(data_pool_id))
5862 rbd_dev->header.data_pool_id = le64_to_cpu(data_pool_id);
5863 WARN_ON(rbd_dev->header.data_pool_id == CEPH_NOPOOL);
5867 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
5869 CEPH_DEFINE_OID_ONSTACK(oid);
5870 size_t image_id_size;
5875 void *reply_buf = NULL;
5877 char *image_name = NULL;
5880 rbd_assert(!rbd_dev->spec->image_name);
5882 len = strlen(rbd_dev->spec->image_id);
5883 image_id_size = sizeof (__le32) + len;
5884 image_id = kmalloc(image_id_size, GFP_KERNEL);
5889 end = image_id + image_id_size;
5890 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
5892 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
5893 reply_buf = kmalloc(size, GFP_KERNEL);
5897 ceph_oid_printf(&oid, "%s", RBD_DIRECTORY);
5898 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
5899 "dir_get_name", image_id, image_id_size,
5904 end = reply_buf + ret;
5906 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
5907 if (IS_ERR(image_name))
5910 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
5918 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5920 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5921 const char *snap_name;
5924 /* Skip over names until we find the one we are looking for */
5926 snap_name = rbd_dev->header.snap_names;
5927 while (which < snapc->num_snaps) {
5928 if (!strcmp(name, snap_name))
5929 return snapc->snaps[which];
5930 snap_name += strlen(snap_name) + 1;
5936 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5938 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5943 for (which = 0; !found && which < snapc->num_snaps; which++) {
5944 const char *snap_name;
5946 snap_id = snapc->snaps[which];
5947 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
5948 if (IS_ERR(snap_name)) {
5949 /* ignore no-longer existing snapshots */
5950 if (PTR_ERR(snap_name) == -ENOENT)
5955 found = !strcmp(name, snap_name);
5958 return found ? snap_id : CEPH_NOSNAP;
5962 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
5963 * no snapshot by that name is found, or if an error occurs.
5965 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5967 if (rbd_dev->image_format == 1)
5968 return rbd_v1_snap_id_by_name(rbd_dev, name);
5970 return rbd_v2_snap_id_by_name(rbd_dev, name);
5974 * An image being mapped will have everything but the snap id.
5976 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
5978 struct rbd_spec *spec = rbd_dev->spec;
5980 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
5981 rbd_assert(spec->image_id && spec->image_name);
5982 rbd_assert(spec->snap_name);
5984 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
5987 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
5988 if (snap_id == CEPH_NOSNAP)
5991 spec->snap_id = snap_id;
5993 spec->snap_id = CEPH_NOSNAP;
6000 * A parent image will have all ids but none of the names.
6002 * All names in an rbd spec are dynamically allocated. It's OK if we
6003 * can't figure out the name for an image id.
6005 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
6007 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
6008 struct rbd_spec *spec = rbd_dev->spec;
6009 const char *pool_name;
6010 const char *image_name;
6011 const char *snap_name;
6014 rbd_assert(spec->pool_id != CEPH_NOPOOL);
6015 rbd_assert(spec->image_id);
6016 rbd_assert(spec->snap_id != CEPH_NOSNAP);
6018 /* Get the pool name; we have to make our own copy of this */
6020 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
6022 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
6025 pool_name = kstrdup(pool_name, GFP_KERNEL);
6029 /* Fetch the image name; tolerate failure here */
6031 image_name = rbd_dev_image_name(rbd_dev);
6033 rbd_warn(rbd_dev, "unable to get image name");
6035 /* Fetch the snapshot name */
6037 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
6038 if (IS_ERR(snap_name)) {
6039 ret = PTR_ERR(snap_name);
6043 spec->pool_name = pool_name;
6044 spec->image_name = image_name;
6045 spec->snap_name = snap_name;
6055 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
6064 struct ceph_snap_context *snapc;
6068 * We'll need room for the seq value (maximum snapshot id),
6069 * snapshot count, and array of that many snapshot ids.
6070 * For now we have a fixed upper limit on the number we're
6071 * prepared to receive.
6073 size = sizeof (__le64) + sizeof (__le32) +
6074 RBD_MAX_SNAP_COUNT * sizeof (__le64);
6075 reply_buf = kzalloc(size, GFP_KERNEL);
6079 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6080 &rbd_dev->header_oloc, "get_snapcontext",
6081 NULL, 0, reply_buf, size);
6082 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6087 end = reply_buf + ret;
6089 ceph_decode_64_safe(&p, end, seq, out);
6090 ceph_decode_32_safe(&p, end, snap_count, out);
6093 * Make sure the reported number of snapshot ids wouldn't go
6094 * beyond the end of our buffer. But before checking that,
6095 * make sure the computed size of the snapshot context we
6096 * allocate is representable in a size_t.
6098 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
6103 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
6107 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
6113 for (i = 0; i < snap_count; i++)
6114 snapc->snaps[i] = ceph_decode_64(&p);
6116 ceph_put_snap_context(rbd_dev->header.snapc);
6117 rbd_dev->header.snapc = snapc;
6119 dout(" snap context seq = %llu, snap_count = %u\n",
6120 (unsigned long long)seq, (unsigned int)snap_count);
6127 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
6138 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
6139 reply_buf = kmalloc(size, GFP_KERNEL);
6141 return ERR_PTR(-ENOMEM);
6143 snapid = cpu_to_le64(snap_id);
6144 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6145 &rbd_dev->header_oloc, "get_snapshot_name",
6146 &snapid, sizeof(snapid), reply_buf, size);
6147 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6149 snap_name = ERR_PTR(ret);
6154 end = reply_buf + ret;
6155 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
6156 if (IS_ERR(snap_name))
6159 dout(" snap_id 0x%016llx snap_name = %s\n",
6160 (unsigned long long)snap_id, snap_name);
6167 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
6169 bool first_time = rbd_dev->header.object_prefix == NULL;
6172 ret = rbd_dev_v2_image_size(rbd_dev);
6177 ret = rbd_dev_v2_header_onetime(rbd_dev);
6182 ret = rbd_dev_v2_snap_context(rbd_dev);
6183 if (ret && first_time) {
6184 kfree(rbd_dev->header.object_prefix);
6185 rbd_dev->header.object_prefix = NULL;
6191 static int rbd_dev_header_info(struct rbd_device *rbd_dev)
6193 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6195 if (rbd_dev->image_format == 1)
6196 return rbd_dev_v1_header_info(rbd_dev);
6198 return rbd_dev_v2_header_info(rbd_dev);
6202 * Skips over white space at *buf, and updates *buf to point to the
6203 * first found non-space character (if any). Returns the length of
6204 * the token (string of non-white space characters) found. Note
6205 * that *buf must be terminated with '\0'.
6207 static inline size_t next_token(const char **buf)
6210 * These are the characters that produce nonzero for
6211 * isspace() in the "C" and "POSIX" locales.
6213 const char *spaces = " \f\n\r\t\v";
6215 *buf += strspn(*buf, spaces); /* Find start of token */
6217 return strcspn(*buf, spaces); /* Return token length */
6221 * Finds the next token in *buf, dynamically allocates a buffer big
6222 * enough to hold a copy of it, and copies the token into the new
6223 * buffer. The copy is guaranteed to be terminated with '\0'. Note
6224 * that a duplicate buffer is created even for a zero-length token.
6226 * Returns a pointer to the newly-allocated duplicate, or a null
6227 * pointer if memory for the duplicate was not available. If
6228 * the lenp argument is a non-null pointer, the length of the token
6229 * (not including the '\0') is returned in *lenp.
6231 * If successful, the *buf pointer will be updated to point beyond
6232 * the end of the found token.
6234 * Note: uses GFP_KERNEL for allocation.
6236 static inline char *dup_token(const char **buf, size_t *lenp)
6241 len = next_token(buf);
6242 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
6245 *(dup + len) = '\0';
6254 static int rbd_parse_param(struct fs_parameter *param,
6255 struct rbd_parse_opts_ctx *pctx)
6257 struct rbd_options *opt = pctx->opts;
6258 struct fs_parse_result result;
6259 struct p_log log = {.prefix = "rbd"};
6262 ret = ceph_parse_param(param, pctx->copts, NULL);
6263 if (ret != -ENOPARAM)
6266 token = __fs_parse(&log, rbd_parameters, param, &result);
6267 dout("%s fs_parse '%s' token %d\n", __func__, param->key, token);
6269 if (token == -ENOPARAM)
6270 return inval_plog(&log, "Unknown parameter '%s'",
6276 case Opt_queue_depth:
6277 if (result.uint_32 < 1)
6279 opt->queue_depth = result.uint_32;
6281 case Opt_alloc_size:
6282 if (result.uint_32 < SECTOR_SIZE)
6284 if (!is_power_of_2(result.uint_32))
6285 return inval_plog(&log, "alloc_size must be a power of 2");
6286 opt->alloc_size = result.uint_32;
6288 case Opt_lock_timeout:
6289 /* 0 is "wait forever" (i.e. infinite timeout) */
6290 if (result.uint_32 > INT_MAX / 1000)
6292 opt->lock_timeout = msecs_to_jiffies(result.uint_32 * 1000);
6295 kfree(pctx->spec->pool_ns);
6296 pctx->spec->pool_ns = param->string;
6297 param->string = NULL;
6299 case Opt_compression_hint:
6300 switch (result.uint_32) {
6301 case Opt_compression_hint_none:
6302 opt->alloc_hint_flags &=
6303 ~(CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE |
6304 CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE);
6306 case Opt_compression_hint_compressible:
6307 opt->alloc_hint_flags |=
6308 CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE;
6309 opt->alloc_hint_flags &=
6310 ~CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE;
6312 case Opt_compression_hint_incompressible:
6313 opt->alloc_hint_flags |=
6314 CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE;
6315 opt->alloc_hint_flags &=
6316 ~CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE;
6323 opt->read_only = true;
6325 case Opt_read_write:
6326 opt->read_only = false;
6328 case Opt_lock_on_read:
6329 opt->lock_on_read = true;
6332 opt->exclusive = true;
6344 return inval_plog(&log, "%s out of range", param->key);
6348 * This duplicates most of generic_parse_monolithic(), untying it from
6349 * fs_context and skipping standard superblock and security options.
6351 static int rbd_parse_options(char *options, struct rbd_parse_opts_ctx *pctx)
6356 dout("%s '%s'\n", __func__, options);
6357 while ((key = strsep(&options, ",")) != NULL) {
6359 struct fs_parameter param = {
6361 .type = fs_value_is_flag,
6363 char *value = strchr(key, '=');
6370 v_len = strlen(value);
6371 param.string = kmemdup_nul(value, v_len,
6375 param.type = fs_value_is_string;
6379 ret = rbd_parse_param(¶m, pctx);
6380 kfree(param.string);
6390 * Parse the options provided for an "rbd add" (i.e., rbd image
6391 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
6392 * and the data written is passed here via a NUL-terminated buffer.
6393 * Returns 0 if successful or an error code otherwise.
6395 * The information extracted from these options is recorded in
6396 * the other parameters which return dynamically-allocated
6399 * The address of a pointer that will refer to a ceph options
6400 * structure. Caller must release the returned pointer using
6401 * ceph_destroy_options() when it is no longer needed.
6403 * Address of an rbd options pointer. Fully initialized by
6404 * this function; caller must release with kfree().
6406 * Address of an rbd image specification pointer. Fully
6407 * initialized by this function based on parsed options.
6408 * Caller must release with rbd_spec_put().
6410 * The options passed take this form:
6411 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
6414 * A comma-separated list of one or more monitor addresses.
6415 * A monitor address is an ip address, optionally followed
6416 * by a port number (separated by a colon).
6417 * I.e.: ip1[:port1][,ip2[:port2]...]
6419 * A comma-separated list of ceph and/or rbd options.
6421 * The name of the rados pool containing the rbd image.
6423 * The name of the image in that pool to map.
6425 * An optional snapshot id. If provided, the mapping will
6426 * present data from the image at the time that snapshot was
6427 * created. The image head is used if no snapshot id is
6428 * provided. Snapshot mappings are always read-only.
6430 static int rbd_add_parse_args(const char *buf,
6431 struct ceph_options **ceph_opts,
6432 struct rbd_options **opts,
6433 struct rbd_spec **rbd_spec)
6437 const char *mon_addrs;
6439 size_t mon_addrs_size;
6440 struct rbd_parse_opts_ctx pctx = { 0 };
6443 /* The first four tokens are required */
6445 len = next_token(&buf);
6447 rbd_warn(NULL, "no monitor address(es) provided");
6451 mon_addrs_size = len;
6455 options = dup_token(&buf, NULL);
6459 rbd_warn(NULL, "no options provided");
6463 pctx.spec = rbd_spec_alloc();
6467 pctx.spec->pool_name = dup_token(&buf, NULL);
6468 if (!pctx.spec->pool_name)
6470 if (!*pctx.spec->pool_name) {
6471 rbd_warn(NULL, "no pool name provided");
6475 pctx.spec->image_name = dup_token(&buf, NULL);
6476 if (!pctx.spec->image_name)
6478 if (!*pctx.spec->image_name) {
6479 rbd_warn(NULL, "no image name provided");
6484 * Snapshot name is optional; default is to use "-"
6485 * (indicating the head/no snapshot).
6487 len = next_token(&buf);
6489 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
6490 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
6491 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
6492 ret = -ENAMETOOLONG;
6495 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
6498 *(snap_name + len) = '\0';
6499 pctx.spec->snap_name = snap_name;
6501 pctx.copts = ceph_alloc_options();
6505 /* Initialize all rbd options to the defaults */
6507 pctx.opts = kzalloc(sizeof(*pctx.opts), GFP_KERNEL);
6511 pctx.opts->read_only = RBD_READ_ONLY_DEFAULT;
6512 pctx.opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
6513 pctx.opts->alloc_size = RBD_ALLOC_SIZE_DEFAULT;
6514 pctx.opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
6515 pctx.opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
6516 pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
6517 pctx.opts->trim = RBD_TRIM_DEFAULT;
6519 ret = ceph_parse_mon_ips(mon_addrs, mon_addrs_size, pctx.copts, NULL);
6523 ret = rbd_parse_options(options, &pctx);
6527 *ceph_opts = pctx.copts;
6529 *rbd_spec = pctx.spec;
6537 ceph_destroy_options(pctx.copts);
6538 rbd_spec_put(pctx.spec);
6543 static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
6545 down_write(&rbd_dev->lock_rwsem);
6546 if (__rbd_is_lock_owner(rbd_dev))
6547 __rbd_release_lock(rbd_dev);
6548 up_write(&rbd_dev->lock_rwsem);
6552 * If the wait is interrupted, an error is returned even if the lock
6553 * was successfully acquired. rbd_dev_image_unlock() will release it
6556 static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
6560 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
6561 if (!rbd_dev->opts->exclusive && !rbd_dev->opts->lock_on_read)
6564 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
6568 if (rbd_is_ro(rbd_dev))
6571 rbd_assert(!rbd_is_lock_owner(rbd_dev));
6572 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
6573 ret = wait_for_completion_killable_timeout(&rbd_dev->acquire_wait,
6574 ceph_timeout_jiffies(rbd_dev->opts->lock_timeout));
6576 ret = rbd_dev->acquire_err;
6578 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
6584 rbd_warn(rbd_dev, "failed to acquire exclusive lock: %ld", ret);
6589 * The lock may have been released by now, unless automatic lock
6590 * transitions are disabled.
6592 rbd_assert(!rbd_dev->opts->exclusive || rbd_is_lock_owner(rbd_dev));
6597 * An rbd format 2 image has a unique identifier, distinct from the
6598 * name given to it by the user. Internally, that identifier is
6599 * what's used to specify the names of objects related to the image.
6601 * A special "rbd id" object is used to map an rbd image name to its
6602 * id. If that object doesn't exist, then there is no v2 rbd image
6603 * with the supplied name.
6605 * This function will record the given rbd_dev's image_id field if
6606 * it can be determined, and in that case will return 0. If any
6607 * errors occur a negative errno will be returned and the rbd_dev's
6608 * image_id field will be unchanged (and should be NULL).
6610 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
6614 CEPH_DEFINE_OID_ONSTACK(oid);
6619 * When probing a parent image, the image id is already
6620 * known (and the image name likely is not). There's no
6621 * need to fetch the image id again in this case. We
6622 * do still need to set the image format though.
6624 if (rbd_dev->spec->image_id) {
6625 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
6631 * First, see if the format 2 image id file exists, and if
6632 * so, get the image's persistent id from it.
6634 ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX,
6635 rbd_dev->spec->image_name);
6639 dout("rbd id object name is %s\n", oid.name);
6641 /* Response will be an encoded string, which includes a length */
6642 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
6643 response = kzalloc(size, GFP_NOIO);
6649 /* If it doesn't exist we'll assume it's a format 1 image */
6651 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
6654 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6655 if (ret == -ENOENT) {
6656 image_id = kstrdup("", GFP_KERNEL);
6657 ret = image_id ? 0 : -ENOMEM;
6659 rbd_dev->image_format = 1;
6660 } else if (ret >= 0) {
6663 image_id = ceph_extract_encoded_string(&p, p + ret,
6665 ret = PTR_ERR_OR_ZERO(image_id);
6667 rbd_dev->image_format = 2;
6671 rbd_dev->spec->image_id = image_id;
6672 dout("image_id is %s\n", image_id);
6676 ceph_oid_destroy(&oid);
6681 * Undo whatever state changes are made by v1 or v2 header info
6684 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
6686 struct rbd_image_header *header;
6688 rbd_dev_parent_put(rbd_dev);
6689 rbd_object_map_free(rbd_dev);
6690 rbd_dev_mapping_clear(rbd_dev);
6692 /* Free dynamic fields from the header, then zero it out */
6694 header = &rbd_dev->header;
6695 ceph_put_snap_context(header->snapc);
6696 kfree(header->snap_sizes);
6697 kfree(header->snap_names);
6698 kfree(header->object_prefix);
6699 memset(header, 0, sizeof (*header));
6702 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
6706 ret = rbd_dev_v2_object_prefix(rbd_dev);
6711 * Get the and check features for the image. Currently the
6712 * features are assumed to never change.
6714 ret = rbd_dev_v2_features(rbd_dev);
6718 /* If the image supports fancy striping, get its parameters */
6720 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
6721 ret = rbd_dev_v2_striping_info(rbd_dev);
6726 if (rbd_dev->header.features & RBD_FEATURE_DATA_POOL) {
6727 ret = rbd_dev_v2_data_pool(rbd_dev);
6732 rbd_init_layout(rbd_dev);
6736 rbd_dev->header.features = 0;
6737 kfree(rbd_dev->header.object_prefix);
6738 rbd_dev->header.object_prefix = NULL;
6743 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
6744 * rbd_dev_image_probe() recursion depth, which means it's also the
6745 * length of the already discovered part of the parent chain.
6747 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
6749 struct rbd_device *parent = NULL;
6752 if (!rbd_dev->parent_spec)
6755 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
6756 pr_info("parent chain is too long (%d)\n", depth);
6761 parent = __rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec);
6768 * Images related by parent/child relationships always share
6769 * rbd_client and spec/parent_spec, so bump their refcounts.
6771 __rbd_get_client(rbd_dev->rbd_client);
6772 rbd_spec_get(rbd_dev->parent_spec);
6774 __set_bit(RBD_DEV_FLAG_READONLY, &parent->flags);
6776 ret = rbd_dev_image_probe(parent, depth);
6780 rbd_dev->parent = parent;
6781 atomic_set(&rbd_dev->parent_ref, 1);
6785 rbd_dev_unparent(rbd_dev);
6786 rbd_dev_destroy(parent);
6790 static void rbd_dev_device_release(struct rbd_device *rbd_dev)
6792 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6793 rbd_free_disk(rbd_dev);
6795 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6799 * rbd_dev->header_rwsem must be locked for write and will be unlocked
6802 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
6806 /* Record our major and minor device numbers. */
6808 if (!single_major) {
6809 ret = register_blkdev(0, rbd_dev->name);
6811 goto err_out_unlock;
6813 rbd_dev->major = ret;
6816 rbd_dev->major = rbd_major;
6817 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
6820 /* Set up the blkdev mapping. */
6822 ret = rbd_init_disk(rbd_dev);
6824 goto err_out_blkdev;
6826 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
6827 set_disk_ro(rbd_dev->disk, rbd_is_ro(rbd_dev));
6829 ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
6833 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6834 up_write(&rbd_dev->header_rwsem);
6838 rbd_free_disk(rbd_dev);
6841 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6843 up_write(&rbd_dev->header_rwsem);
6847 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
6849 struct rbd_spec *spec = rbd_dev->spec;
6852 /* Record the header object name for this rbd image. */
6854 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6855 if (rbd_dev->image_format == 1)
6856 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6857 spec->image_name, RBD_SUFFIX);
6859 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6860 RBD_HEADER_PREFIX, spec->image_id);
6865 static void rbd_print_dne(struct rbd_device *rbd_dev, bool is_snap)
6868 pr_info("image %s/%s%s%s does not exist\n",
6869 rbd_dev->spec->pool_name,
6870 rbd_dev->spec->pool_ns ?: "",
6871 rbd_dev->spec->pool_ns ? "/" : "",
6872 rbd_dev->spec->image_name);
6874 pr_info("snap %s/%s%s%s@%s does not exist\n",
6875 rbd_dev->spec->pool_name,
6876 rbd_dev->spec->pool_ns ?: "",
6877 rbd_dev->spec->pool_ns ? "/" : "",
6878 rbd_dev->spec->image_name,
6879 rbd_dev->spec->snap_name);
6883 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
6885 if (!rbd_is_ro(rbd_dev))
6886 rbd_unregister_watch(rbd_dev);
6888 rbd_dev_unprobe(rbd_dev);
6889 rbd_dev->image_format = 0;
6890 kfree(rbd_dev->spec->image_id);
6891 rbd_dev->spec->image_id = NULL;
6895 * Probe for the existence of the header object for the given rbd
6896 * device. If this image is the one being mapped (i.e., not a
6897 * parent), initiate a watch on its header object before using that
6898 * object to get detailed information about the rbd image.
6900 * On success, returns with header_rwsem held for write if called
6903 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
6905 bool need_watch = !rbd_is_ro(rbd_dev);
6909 * Get the id from the image id object. Unless there's an
6910 * error, rbd_dev->spec->image_id will be filled in with
6911 * a dynamically-allocated string, and rbd_dev->image_format
6912 * will be set to either 1 or 2.
6914 ret = rbd_dev_image_id(rbd_dev);
6918 ret = rbd_dev_header_name(rbd_dev);
6920 goto err_out_format;
6923 ret = rbd_register_watch(rbd_dev);
6926 rbd_print_dne(rbd_dev, false);
6927 goto err_out_format;
6932 down_write(&rbd_dev->header_rwsem);
6934 ret = rbd_dev_header_info(rbd_dev);
6936 if (ret == -ENOENT && !need_watch)
6937 rbd_print_dne(rbd_dev, false);
6942 * If this image is the one being mapped, we have pool name and
6943 * id, image name and id, and snap name - need to fill snap id.
6944 * Otherwise this is a parent image, identified by pool, image
6945 * and snap ids - need to fill in names for those ids.
6948 ret = rbd_spec_fill_snap_id(rbd_dev);
6950 ret = rbd_spec_fill_names(rbd_dev);
6953 rbd_print_dne(rbd_dev, true);
6957 ret = rbd_dev_mapping_set(rbd_dev);
6961 if (rbd_is_snap(rbd_dev) &&
6962 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)) {
6963 ret = rbd_object_map_load(rbd_dev);
6968 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
6969 ret = rbd_dev_v2_parent_info(rbd_dev);
6974 ret = rbd_dev_probe_parent(rbd_dev, depth);
6978 dout("discovered format %u image, header name is %s\n",
6979 rbd_dev->image_format, rbd_dev->header_oid.name);
6984 up_write(&rbd_dev->header_rwsem);
6986 rbd_unregister_watch(rbd_dev);
6987 rbd_dev_unprobe(rbd_dev);
6989 rbd_dev->image_format = 0;
6990 kfree(rbd_dev->spec->image_id);
6991 rbd_dev->spec->image_id = NULL;
6995 static ssize_t do_rbd_add(struct bus_type *bus,
6999 struct rbd_device *rbd_dev = NULL;
7000 struct ceph_options *ceph_opts = NULL;
7001 struct rbd_options *rbd_opts = NULL;
7002 struct rbd_spec *spec = NULL;
7003 struct rbd_client *rbdc;
7006 if (!capable(CAP_SYS_ADMIN))
7009 if (!try_module_get(THIS_MODULE))
7012 /* parse add command */
7013 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
7017 rbdc = rbd_get_client(ceph_opts);
7024 rc = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, spec->pool_name);
7027 pr_info("pool %s does not exist\n", spec->pool_name);
7028 goto err_out_client;
7030 spec->pool_id = (u64)rc;
7032 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
7035 goto err_out_client;
7037 rbdc = NULL; /* rbd_dev now owns this */
7038 spec = NULL; /* rbd_dev now owns this */
7039 rbd_opts = NULL; /* rbd_dev now owns this */
7041 /* if we are mapping a snapshot it will be a read-only mapping */
7042 if (rbd_dev->opts->read_only ||
7043 strcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME))
7044 __set_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
7046 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
7047 if (!rbd_dev->config_info) {
7049 goto err_out_rbd_dev;
7052 rc = rbd_dev_image_probe(rbd_dev, 0);
7054 goto err_out_rbd_dev;
7056 if (rbd_dev->opts->alloc_size > rbd_dev->layout.object_size) {
7057 rbd_warn(rbd_dev, "alloc_size adjusted to %u",
7058 rbd_dev->layout.object_size);
7059 rbd_dev->opts->alloc_size = rbd_dev->layout.object_size;
7062 rc = rbd_dev_device_setup(rbd_dev);
7064 goto err_out_image_probe;
7066 rc = rbd_add_acquire_lock(rbd_dev);
7068 goto err_out_image_lock;
7070 /* Everything's ready. Announce the disk to the world. */
7072 rc = device_add(&rbd_dev->dev);
7074 goto err_out_image_lock;
7076 device_add_disk(&rbd_dev->dev, rbd_dev->disk, NULL);
7078 spin_lock(&rbd_dev_list_lock);
7079 list_add_tail(&rbd_dev->node, &rbd_dev_list);
7080 spin_unlock(&rbd_dev_list_lock);
7082 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
7083 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
7084 rbd_dev->header.features);
7087 module_put(THIS_MODULE);
7091 rbd_dev_image_unlock(rbd_dev);
7092 rbd_dev_device_release(rbd_dev);
7093 err_out_image_probe:
7094 rbd_dev_image_release(rbd_dev);
7096 rbd_dev_destroy(rbd_dev);
7098 rbd_put_client(rbdc);
7105 static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count)
7110 return do_rbd_add(bus, buf, count);
7113 static ssize_t add_single_major_store(struct bus_type *bus, const char *buf,
7116 return do_rbd_add(bus, buf, count);
7119 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
7121 while (rbd_dev->parent) {
7122 struct rbd_device *first = rbd_dev;
7123 struct rbd_device *second = first->parent;
7124 struct rbd_device *third;
7127 * Follow to the parent with no grandparent and
7130 while (second && (third = second->parent)) {
7135 rbd_dev_image_release(second);
7136 rbd_dev_destroy(second);
7137 first->parent = NULL;
7138 first->parent_overlap = 0;
7140 rbd_assert(first->parent_spec);
7141 rbd_spec_put(first->parent_spec);
7142 first->parent_spec = NULL;
7146 static ssize_t do_rbd_remove(struct bus_type *bus,
7150 struct rbd_device *rbd_dev = NULL;
7151 struct list_head *tmp;
7157 if (!capable(CAP_SYS_ADMIN))
7162 sscanf(buf, "%d %5s", &dev_id, opt_buf);
7164 pr_err("dev_id out of range\n");
7167 if (opt_buf[0] != '\0') {
7168 if (!strcmp(opt_buf, "force")) {
7171 pr_err("bad remove option at '%s'\n", opt_buf);
7177 spin_lock(&rbd_dev_list_lock);
7178 list_for_each(tmp, &rbd_dev_list) {
7179 rbd_dev = list_entry(tmp, struct rbd_device, node);
7180 if (rbd_dev->dev_id == dev_id) {
7186 spin_lock_irq(&rbd_dev->lock);
7187 if (rbd_dev->open_count && !force)
7189 else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING,
7192 spin_unlock_irq(&rbd_dev->lock);
7194 spin_unlock(&rbd_dev_list_lock);
7200 * Prevent new IO from being queued and wait for existing
7201 * IO to complete/fail.
7203 blk_mq_freeze_queue(rbd_dev->disk->queue);
7204 blk_set_queue_dying(rbd_dev->disk->queue);
7207 del_gendisk(rbd_dev->disk);
7208 spin_lock(&rbd_dev_list_lock);
7209 list_del_init(&rbd_dev->node);
7210 spin_unlock(&rbd_dev_list_lock);
7211 device_del(&rbd_dev->dev);
7213 rbd_dev_image_unlock(rbd_dev);
7214 rbd_dev_device_release(rbd_dev);
7215 rbd_dev_image_release(rbd_dev);
7216 rbd_dev_destroy(rbd_dev);
7220 static ssize_t remove_store(struct bus_type *bus, const char *buf, size_t count)
7225 return do_rbd_remove(bus, buf, count);
7228 static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf,
7231 return do_rbd_remove(bus, buf, count);
7235 * create control files in sysfs
7238 static int __init rbd_sysfs_init(void)
7242 ret = device_register(&rbd_root_dev);
7246 ret = bus_register(&rbd_bus_type);
7248 device_unregister(&rbd_root_dev);
7253 static void __exit rbd_sysfs_cleanup(void)
7255 bus_unregister(&rbd_bus_type);
7256 device_unregister(&rbd_root_dev);
7259 static int __init rbd_slab_init(void)
7261 rbd_assert(!rbd_img_request_cache);
7262 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
7263 if (!rbd_img_request_cache)
7266 rbd_assert(!rbd_obj_request_cache);
7267 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
7268 if (!rbd_obj_request_cache)
7274 kmem_cache_destroy(rbd_img_request_cache);
7275 rbd_img_request_cache = NULL;
7279 static void rbd_slab_exit(void)
7281 rbd_assert(rbd_obj_request_cache);
7282 kmem_cache_destroy(rbd_obj_request_cache);
7283 rbd_obj_request_cache = NULL;
7285 rbd_assert(rbd_img_request_cache);
7286 kmem_cache_destroy(rbd_img_request_cache);
7287 rbd_img_request_cache = NULL;
7290 static int __init rbd_init(void)
7294 if (!libceph_compatible(NULL)) {
7295 rbd_warn(NULL, "libceph incompatibility (quitting)");
7299 rc = rbd_slab_init();
7304 * The number of active work items is limited by the number of
7305 * rbd devices * queue depth, so leave @max_active at default.
7307 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
7314 rbd_major = register_blkdev(0, RBD_DRV_NAME);
7315 if (rbd_major < 0) {
7321 rc = rbd_sysfs_init();
7323 goto err_out_blkdev;
7326 pr_info("loaded (major %d)\n", rbd_major);
7328 pr_info("loaded\n");
7334 unregister_blkdev(rbd_major, RBD_DRV_NAME);
7336 destroy_workqueue(rbd_wq);
7342 static void __exit rbd_exit(void)
7344 ida_destroy(&rbd_dev_id_ida);
7345 rbd_sysfs_cleanup();
7347 unregister_blkdev(rbd_major, RBD_DRV_NAME);
7348 destroy_workqueue(rbd_wq);
7352 module_init(rbd_init);
7353 module_exit(rbd_exit);
7355 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
7356 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
7357 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
7358 /* following authorship retained from original osdblk.c */
7359 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
7361 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
7362 MODULE_LICENSE("GPL");