2 * Internal header file _only_ for device mapper core
4 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
6 * This file is released under the LGPL.
9 #ifndef DM_CORE_INTERNAL_H
10 #define DM_CORE_INTERNAL_H
12 #include <linux/kthread.h>
13 #include <linux/ktime.h>
14 #include <linux/genhd.h>
15 #include <linux/blk-mq.h>
17 #include <trace/events/block.h>
21 #define DM_RESERVED_MAX_IOS 1024
23 struct dm_kobject_holder {
25 struct completion completion;
29 * DM core internal structures used directly by dm.c, dm-rq.c and dm-table.c.
30 * DM targets must _not_ deference a mapped_device or dm_table to directly
31 * access their members!
34 struct mapped_device {
35 struct mutex suspend_lock;
37 struct mutex table_devices_lock;
38 struct list_head table_devices;
41 * The current mapping (struct dm_table *).
42 * Use dm_get_live_table{_fast} or take suspend_lock for
49 /* Protect queue and type against concurrent access. */
50 struct mutex type_lock;
51 enum dm_queue_mode type;
54 struct request_queue *queue;
59 struct dm_target *immutable_target;
60 struct target_type *immutable_target_type;
64 struct dax_device *dax_dev;
67 * A list of ios that arrived while we were suspended.
69 struct work_struct work;
70 wait_queue_head_t wait;
71 spinlock_t deferred_lock;
72 struct bio_list deferred;
79 wait_queue_head_t eventq;
82 struct list_head uevent_list;
83 spinlock_t uevent_lock; /* Protect access to uevent_list */
85 /* the number of internal suspends */
86 unsigned internal_suspend_count;
89 * io objects are allocated from here.
95 * Processing queue (flush)
97 struct workqueue_struct *wq;
100 * freeze/thaw support require holding onto a super block
102 struct super_block *frozen_sb;
104 /* forced geometry settings */
105 struct hd_geometry geometry;
107 /* kobject and completion */
108 struct dm_kobject_holder kobj_holder;
110 struct block_device *bdev;
112 struct dm_stats stats;
114 /* for blk-mq request-based DM support */
115 struct blk_mq_tag_set *tag_set;
118 struct srcu_struct io_barrier;
121 void disable_discard(struct mapped_device *md);
122 void disable_write_same(struct mapped_device *md);
123 void disable_write_zeroes(struct mapped_device *md);
125 static inline sector_t dm_get_size(struct mapped_device *md)
127 return get_capacity(md->disk);
130 static inline struct dm_stats *dm_get_stats(struct mapped_device *md)
135 #define DM_TABLE_MAX_DEPTH 16
138 struct mapped_device *md;
139 enum dm_queue_mode type;
143 unsigned int counts[DM_TABLE_MAX_DEPTH]; /* in nodes */
144 sector_t *index[DM_TABLE_MAX_DEPTH];
146 unsigned int num_targets;
147 unsigned int num_allocated;
149 struct dm_target *targets;
151 struct target_type *immutable_target_type;
153 bool integrity_supported:1;
155 unsigned integrity_added:1;
158 * Indicates the rw permissions for the new logical
159 * device. This should be a combination of FMODE_READ
164 /* a list of devices used by this table */
165 struct list_head devices;
167 /* events get handed up using this callback */
168 void (*event_fn)(void *);
171 struct dm_md_mempools *mempools;
174 static inline struct completion *dm_get_completion_from_kobject(struct kobject *kobj)
176 return &container_of(kobj, struct dm_kobject_holder, kobj)->completion;
179 unsigned __dm_get_module_param(unsigned *module_param, unsigned def, unsigned max);
181 static inline bool dm_message_test_buffer_overflow(char *result, unsigned maxlen)
183 return !maxlen || strlen(result) + 1 >= maxlen;
186 extern atomic_t dm_global_event_nr;
187 extern wait_queue_head_t dm_global_eventq;
188 void dm_issue_global_event(void);