2 * Internal header file _only_ for device mapper core
4 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
6 * This file is released under the LGPL.
9 #ifndef DM_CORE_INTERNAL_H
10 #define DM_CORE_INTERNAL_H
12 #include <linux/kthread.h>
13 #include <linux/ktime.h>
14 #include <linux/blk-mq.h>
15 #include <linux/blk-crypto-profile.h>
16 #include <linux/jump_label.h>
18 #include <trace/events/block.h>
23 #define DM_RESERVED_MAX_IOS 1024
25 struct dm_kobject_holder {
27 struct completion completion;
31 * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c.
32 * DM targets must _not_ deference a mapped_device or dm_table to directly
33 * access their members!
37 * For mempools pre-allocation at the table loading time.
39 struct dm_md_mempools {
44 struct mapped_device {
45 struct mutex suspend_lock;
47 struct mutex table_devices_lock;
48 struct list_head table_devices;
51 * The current mapping (struct dm_table *).
52 * Use dm_get_live_table{_fast} or take suspend_lock for
59 /* Protect queue and type against concurrent access. */
60 struct mutex type_lock;
61 enum dm_queue_mode type;
64 struct request_queue *queue;
69 struct dm_target *immutable_target;
70 struct target_type *immutable_target_type;
74 struct dax_device *dax_dev;
76 wait_queue_head_t wait;
77 unsigned long __percpu *pending_io;
79 /* forced geometry settings */
80 struct hd_geometry geometry;
83 * Processing queue (flush)
85 struct workqueue_struct *wq;
88 * A list of ios that arrived while we were suspended.
90 struct work_struct work;
91 spinlock_t deferred_lock;
92 struct bio_list deferred;
99 wait_queue_head_t eventq;
102 struct list_head uevent_list;
103 spinlock_t uevent_lock; /* Protect access to uevent_list */
105 /* for blk-mq request-based DM support */
107 struct blk_mq_tag_set *tag_set;
109 struct dm_stats stats;
111 /* the number of internal suspends */
112 unsigned internal_suspend_count;
115 struct semaphore swap_bios_semaphore;
116 struct mutex swap_bios_lock;
119 * io objects are allocated from here.
121 struct dm_md_mempools *mempools;
123 /* kobject and completion */
124 struct dm_kobject_holder kobj_holder;
126 struct srcu_struct io_barrier;
128 #ifdef CONFIG_BLK_DEV_ZONED
129 unsigned int nr_zones;
130 unsigned int *zwp_offset;
134 struct dm_ima_measurements ima;
139 * Bits for the flags field of struct mapped_device.
141 #define DMF_BLOCK_IO_FOR_SUSPEND 0
142 #define DMF_SUSPENDED 1
144 #define DMF_FREEING 3
145 #define DMF_DELETING 4
146 #define DMF_NOFLUSH_SUSPENDING 5
147 #define DMF_DEFERRED_REMOVE 6
148 #define DMF_SUSPENDED_INTERNALLY 7
149 #define DMF_POST_SUSPENDING 8
150 #define DMF_EMULATE_ZONE_APPEND 9
152 void disable_discard(struct mapped_device *md);
153 void disable_write_zeroes(struct mapped_device *md);
155 static inline sector_t dm_get_size(struct mapped_device *md)
157 return get_capacity(md->disk);
160 static inline struct dm_stats *dm_get_stats(struct mapped_device *md)
165 DECLARE_STATIC_KEY_FALSE(stats_enabled);
166 DECLARE_STATIC_KEY_FALSE(swap_bios_enabled);
167 DECLARE_STATIC_KEY_FALSE(zoned_enabled);
169 static inline bool dm_emulate_zone_append(struct mapped_device *md)
171 if (blk_queue_is_zoned(md->queue))
172 return test_bit(DMF_EMULATE_ZONE_APPEND, &md->flags);
176 #define DM_TABLE_MAX_DEPTH 16
179 struct mapped_device *md;
180 enum dm_queue_mode type;
184 unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */
185 sector_t *index[DM_TABLE_MAX_DEPTH];
187 unsigned int num_targets;
188 unsigned int num_allocated;
190 struct dm_target *targets;
192 struct target_type *immutable_target_type;
194 bool integrity_supported:1;
196 unsigned integrity_added:1;
199 * Indicates the rw permissions for the new logical
200 * device. This should be a combination of FMODE_READ
205 /* a list of devices used by this table */
206 struct list_head devices;
208 /* events get handed up using this callback */
209 void (*event_fn)(void *);
212 struct dm_md_mempools *mempools;
214 #ifdef CONFIG_BLK_INLINE_ENCRYPTION
215 struct blk_crypto_profile *crypto_profile;
220 * One of these is allocated per clone bio.
222 #define DM_TIO_MAGIC 28714
223 struct dm_target_io {
224 unsigned short magic;
226 unsigned int target_bio_nr;
228 struct dm_target *ti;
229 unsigned int *len_ptr;
239 DM_TIO_IS_DUPLICATE_BIO
242 static inline bool dm_tio_flagged(struct dm_target_io *tio, unsigned int bit)
244 return (tio->flags & (1U << bit)) != 0;
247 static inline void dm_tio_set_flag(struct dm_target_io *tio, unsigned int bit)
249 tio->flags |= (1U << bit);
252 static inline bool dm_tio_is_normal(struct dm_target_io *tio)
254 return (dm_tio_flagged(tio, DM_TIO_INSIDE_DM_IO) &&
255 !dm_tio_flagged(tio, DM_TIO_IS_DUPLICATE_BIO));
259 * One of these is allocated per original bio.
260 * It contains the first clone used for that original.
262 #define DM_IO_MAGIC 19577
264 unsigned short magic;
267 unsigned long start_time;
270 struct dm_stats_aux stats_aux;
273 struct mapped_device *md;
275 /* The three fields represent mapped part of original bio */
276 struct bio *orig_bio;
277 unsigned int sector_offset; /* offset to end of orig_bio */
278 unsigned int sectors;
280 /* last member of dm_target_io is 'struct bio' */
281 struct dm_target_io tio;
292 static inline bool dm_io_flagged(struct dm_io *io, unsigned int bit)
294 return (io->flags & (1U << bit)) != 0;
297 static inline void dm_io_set_flag(struct dm_io *io, unsigned int bit)
299 io->flags |= (1U << bit);
302 static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
304 return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
307 unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
309 static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
311 return !maxlen || strlen(result) + 1 >= maxlen;
314 extern atomic_t dm_global_event_nr;
315 extern wait_queue_head_t dm_global_eventq;
316 void dm_issue_global_event(void);