2 * Internal header file _only_ for device mapper core
4 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
6 * This file is released under the LGPL.
9 #ifndef DM_CORE_INTERNAL_H
10 #define DM_CORE_INTERNAL_H
12 #include <linux/kthread.h>
13 #include <linux/ktime.h>
14 #include <linux/blk-mq.h>
15 #include <linux/blk-crypto-profile.h>
16 #include <linux/jump_label.h>
18 #include <trace/events/block.h>
23 #define DM_RESERVED_MAX_IOS 1024
25 struct dm_kobject_holder {
27 struct completion completion;
31 * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c.
32 * DM targets must _not_ deference a mapped_device or dm_table to directly
33 * access their members!
36 struct mapped_device {
37 struct mutex suspend_lock;
39 struct mutex table_devices_lock;
40 struct list_head table_devices;
43 * The current mapping (struct dm_table *).
44 * Use dm_get_live_table{_fast} or take suspend_lock for
51 /* Protect queue and type against concurrent access. */
52 struct mutex type_lock;
53 enum dm_queue_mode type;
56 struct request_queue *queue;
61 struct dm_target *immutable_target;
62 struct target_type *immutable_target_type;
66 struct dax_device *dax_dev;
68 wait_queue_head_t wait;
69 unsigned long __percpu *pending_io;
71 /* forced geometry settings */
72 struct hd_geometry geometry;
75 * Processing queue (flush)
77 struct workqueue_struct *wq;
80 * A list of ios that arrived while we were suspended.
82 struct work_struct work;
83 spinlock_t deferred_lock;
84 struct bio_list deferred;
91 wait_queue_head_t eventq;
94 struct list_head uevent_list;
95 spinlock_t uevent_lock; /* Protect access to uevent_list */
97 /* for blk-mq request-based DM support */
99 struct blk_mq_tag_set *tag_set;
101 struct dm_stats stats;
103 /* the number of internal suspends */
104 unsigned internal_suspend_count;
107 struct semaphore swap_bios_semaphore;
108 struct mutex swap_bios_lock;
111 * io objects are allocated from here.
113 struct bio_set io_bs;
116 /* kobject and completion */
117 struct dm_kobject_holder kobj_holder;
119 struct srcu_struct io_barrier;
121 #ifdef CONFIG_BLK_DEV_ZONED
122 unsigned int nr_zones;
123 unsigned int *zwp_offset;
127 struct dm_ima_measurements ima;
132 * Bits for the flags field of struct mapped_device.
134 #define DMF_BLOCK_IO_FOR_SUSPEND 0
135 #define DMF_SUSPENDED 1
137 #define DMF_FREEING 3
138 #define DMF_DELETING 4
139 #define DMF_NOFLUSH_SUSPENDING 5
140 #define DMF_DEFERRED_REMOVE 6
141 #define DMF_SUSPENDED_INTERNALLY 7
142 #define DMF_POST_SUSPENDING 8
143 #define DMF_EMULATE_ZONE_APPEND 9
145 void disable_discard(struct mapped_device *md);
146 void disable_write_zeroes(struct mapped_device *md);
148 static inline sector_t dm_get_size(struct mapped_device *md)
150 return get_capacity(md->disk);
153 static inline struct dm_stats *dm_get_stats(struct mapped_device *md)
158 DECLARE_STATIC_KEY_FALSE(stats_enabled);
159 DECLARE_STATIC_KEY_FALSE(swap_bios_enabled);
160 DECLARE_STATIC_KEY_FALSE(zoned_enabled);
162 static inline bool dm_emulate_zone_append(struct mapped_device *md)
164 if (blk_queue_is_zoned(md->queue))
165 return test_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
169 #define DM_TABLE_MAX_DEPTH 16
172 struct mapped_device *md;
173 enum dm_queue_mode type;
177 unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */
178 sector_t *index[DM_TABLE_MAX_DEPTH];
180 unsigned int num_targets;
181 unsigned int num_allocated;
183 struct dm_target *targets;
185 struct target_type *immutable_target_type;
187 bool integrity_supported:1;
189 unsigned integrity_added:1;
192 * Indicates the rw permissions for the new logical
193 * device. This should be a combination of FMODE_READ
198 /* a list of devices used by this table */
199 struct list_head devices;
201 /* events get handed up using this callback */
202 void (*event_fn)(void *);
205 struct dm_md_mempools *mempools;
207 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
208 struct blk_crypto_profile *crypto_profile;
213 * One of these is allocated per clone bio.
215 #define DM_TIO_MAGIC 28714
216 struct dm_target_io {
217 unsigned short magic;
219 unsigned int target_bio_nr;
221 struct dm_target *ti;
222 unsigned int *len_ptr;
232 DM_TIO_IS_DUPLICATE_BIO
235 static inline bool dm_tio_flagged(struct dm_target_io *tio, unsigned int bit)
237 return (tio->flags & (1U << bit)) != 0;
240 static inline void dm_tio_set_flag(struct dm_target_io *tio, unsigned int bit)
242 tio->flags |= (1U << bit);
245 static inline bool dm_tio_is_normal(struct dm_target_io *tio)
247 return (dm_tio_flagged(tio, DM_TIO_INSIDE_DM_IO) &&
248 !dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
252 * One of these is allocated per original bio.
253 * It contains the first clone used for that original.
255 #define DM_IO_MAGIC 19577
257 unsigned short magic;
260 unsigned long start_time;
263 struct dm_stats_aux stats_aux;
266 struct mapped_device *md;
268 /* The three fields represent mapped part of original bio */
269 struct bio *orig_bio;
270 unsigned int sector_offset; /* offset to end of orig_bio */
271 unsigned int sectors;
273 /* last member of dm_target_io is 'struct bio' */
274 struct dm_target_io tio;
285 static inline bool dm_io_flagged(struct dm_io *io, unsigned int bit)
287 return (io->flags & (1U << bit)) != 0;
290 static inline void dm_io_set_flag(struct dm_io *io, unsigned int bit)
292 io->flags |= (1U << bit);
295 static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
297 return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
300 unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
302 static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
304 return !maxlen || strlen(result) + 1 >= maxlen;
307 extern atomic_t dm_global_event_nr;
308 extern wait_queue_head_t dm_global_eventq;
309 void dm_issue_global_event(void);