1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
5 * This file is released under the GPL.
11 #include <linux/types.h>
12 #include <linux/blkdev.h>
13 #include <linux/device-mapper.h>
14 #include <linux/dm-kcopyd.h>
15 #include <linux/list.h>
16 #include <linux/spinlock.h>
17 #include <linux/mutex.h>
18 #include <linux/workqueue.h>
19 #include <linux/rwsem.h>
20 #include <linux/rbtree.h>
21 #include <linux/radix-tree.h>
22 #include <linux/shrinker.h>
25 * dm-zoned creates block devices with 4KB blocks, always.
27 #define DMZ_BLOCK_SHIFT 12
28 #define DMZ_BLOCK_SIZE (1 << DMZ_BLOCK_SHIFT)
29 #define DMZ_BLOCK_MASK (DMZ_BLOCK_SIZE - 1)
31 #define DMZ_BLOCK_SHIFT_BITS (DMZ_BLOCK_SHIFT + 3)
32 #define DMZ_BLOCK_SIZE_BITS (1 << DMZ_BLOCK_SHIFT_BITS)
33 #define DMZ_BLOCK_MASK_BITS (DMZ_BLOCK_SIZE_BITS - 1)
35 #define DMZ_BLOCK_SECTORS_SHIFT (DMZ_BLOCK_SHIFT - SECTOR_SHIFT)
36 #define DMZ_BLOCK_SECTORS (DMZ_BLOCK_SIZE >> SECTOR_SHIFT)
37 #define DMZ_BLOCK_SECTORS_MASK (DMZ_BLOCK_SECTORS - 1)
40 * 4KB block <-> 512B sector conversion.
42 #define dmz_blk2sect(b) ((sector_t)(b) << DMZ_BLOCK_SECTORS_SHIFT)
43 #define dmz_sect2blk(s) ((sector_t)(s) >> DMZ_BLOCK_SECTORS_SHIFT)
45 #define dmz_bio_block(bio) dmz_sect2blk((bio)->bi_iter.bi_sector)
46 #define dmz_bio_blocks(bio) dmz_sect2blk(bio_sectors(bio))
49 * Zoned block device information.
52 struct block_device *bdev;
54 char name[BDEVNAME_SIZE];
59 unsigned int nr_zones;
60 unsigned int zone_offset;
64 sector_t zone_nr_sectors;
67 #define dmz_bio_chunk(zmd, bio) ((bio)->bi_iter.bi_sector >> \
68 dmz_zone_nr_sectors_shift(zmd))
69 #define dmz_chunk_block(zmd, b) ((b) & (dmz_zone_nr_blocks(zmd) - 1))
72 #define DMZ_BDEV_DYING (1 << 0)
73 #define DMZ_CHECK_BDEV (2 << 0)
74 #define DMZ_BDEV_REGULAR (4 << 0)
80 /* For listing the zone depending on its state */
81 struct list_head link;
83 /* Zone type and state */
86 /* Zone activation reference count */
92 /* Zone write pointer block (relative to the zone start block) */
93 unsigned int wp_block;
95 /* Zone weight (number of valid blocks in the zone) */
98 /* The chunk that the zone maps */
102 * For a sequential data zone, pointer to the random zone
103 * used as a buffer for processing unaligned writes.
104 * For a buffer zone, this points back to the data zone.
106 struct dm_zone *bzone;
113 /* Zone write type */
118 /* Zone critical condition */
122 /* How the zone is being used */
127 /* Zone internal state */
133 * Zone data accessors.
135 #define dmz_is_cache(z) test_bit(DMZ_CACHE, &(z)->flags)
136 #define dmz_is_rnd(z) test_bit(DMZ_RND, &(z)->flags)
137 #define dmz_is_seq(z) test_bit(DMZ_SEQ, &(z)->flags)
138 #define dmz_is_empty(z) ((z)->wp_block == 0)
139 #define dmz_is_offline(z) test_bit(DMZ_OFFLINE, &(z)->flags)
140 #define dmz_is_readonly(z) test_bit(DMZ_READ_ONLY, &(z)->flags)
141 #define dmz_in_reclaim(z) test_bit(DMZ_RECLAIM, &(z)->flags)
142 #define dmz_seq_write_err(z) test_bit(DMZ_SEQ_WRITE_ERR, &(z)->flags)
144 #define dmz_is_meta(z) test_bit(DMZ_META, &(z)->flags)
145 #define dmz_is_buf(z) test_bit(DMZ_BUF, &(z)->flags)
146 #define dmz_is_data(z) test_bit(DMZ_DATA, &(z)->flags)
148 #define dmz_weight(z) ((z)->weight)
153 #define dmz_dev_info(dev, format, args...) \
154 DMINFO("(%s): " format, (dev)->name, ## args)
156 #define dmz_dev_err(dev, format, args...) \
157 DMERR("(%s): " format, (dev)->name, ## args)
159 #define dmz_dev_warn(dev, format, args...) \
160 DMWARN("(%s): " format, (dev)->name, ## args)
162 #define dmz_dev_debug(dev, format, args...) \
163 DMDEBUG("(%s): " format, (dev)->name, ## args)
169 * Functions defined in dm-zoned-metadata.c
171 int dmz_ctr_metadata(struct dmz_dev *dev, int num_dev,
172 struct dmz_metadata **zmd, const char *devname);
173 void dmz_dtr_metadata(struct dmz_metadata *zmd);
174 int dmz_resume_metadata(struct dmz_metadata *zmd);
176 void dmz_lock_map(struct dmz_metadata *zmd);
177 void dmz_unlock_map(struct dmz_metadata *zmd);
178 void dmz_lock_metadata(struct dmz_metadata *zmd);
179 void dmz_unlock_metadata(struct dmz_metadata *zmd);
180 void dmz_lock_flush(struct dmz_metadata *zmd);
181 void dmz_unlock_flush(struct dmz_metadata *zmd);
182 int dmz_flush_metadata(struct dmz_metadata *zmd);
183 const char *dmz_metadata_label(struct dmz_metadata *zmd);
185 sector_t dmz_start_sect(struct dmz_metadata *zmd, struct dm_zone *zone);
186 sector_t dmz_start_block(struct dmz_metadata *zmd, struct dm_zone *zone);
187 unsigned int dmz_nr_chunks(struct dmz_metadata *zmd);
188 struct dmz_dev *dmz_zone_to_dev(struct dmz_metadata *zmd, struct dm_zone *zone);
190 bool dmz_check_dev(struct dmz_metadata *zmd);
191 bool dmz_dev_is_dying(struct dmz_metadata *zmd);
193 #define DMZ_ALLOC_RND 0x01
194 #define DMZ_ALLOC_CACHE 0x02
195 #define DMZ_ALLOC_SEQ 0x04
196 #define DMZ_ALLOC_RECLAIM 0x10
198 struct dm_zone *dmz_alloc_zone(struct dmz_metadata *zmd, unsigned long flags);
199 void dmz_free_zone(struct dmz_metadata *zmd, struct dm_zone *zone);
201 void dmz_map_zone(struct dmz_metadata *zmd, struct dm_zone *zone,
203 void dmz_unmap_zone(struct dmz_metadata *zmd, struct dm_zone *zone);
204 unsigned int dmz_nr_zones(struct dmz_metadata *zmd);
205 unsigned int dmz_nr_cache_zones(struct dmz_metadata *zmd);
206 unsigned int dmz_nr_unmap_cache_zones(struct dmz_metadata *zmd);
207 unsigned int dmz_nr_rnd_zones(struct dmz_metadata *zmd);
208 unsigned int dmz_nr_unmap_rnd_zones(struct dmz_metadata *zmd);
209 unsigned int dmz_nr_seq_zones(struct dmz_metadata *zmd);
210 unsigned int dmz_nr_unmap_seq_zones(struct dmz_metadata *zmd);
211 unsigned int dmz_zone_nr_blocks(struct dmz_metadata *zmd);
212 unsigned int dmz_zone_nr_blocks_shift(struct dmz_metadata *zmd);
213 unsigned int dmz_zone_nr_sectors(struct dmz_metadata *zmd);
214 unsigned int dmz_zone_nr_sectors_shift(struct dmz_metadata *zmd);
217 * Activate a zone (increment its reference count).
219 static inline void dmz_activate_zone(struct dm_zone *zone)
221 atomic_inc(&zone->refcount);
225 * Deactivate a zone. This decrement the zone reference counter
226 * indicating that all BIOs to the zone have completed when the count is 0.
228 static inline void dmz_deactivate_zone(struct dm_zone *zone)
230 atomic_dec(&zone->refcount);
234 * Test if a zone is active, that is, has a refcount > 0.
236 static inline bool dmz_is_active(struct dm_zone *zone)
238 return atomic_read(&zone->refcount);
241 int dmz_lock_zone_reclaim(struct dm_zone *zone);
242 void dmz_unlock_zone_reclaim(struct dm_zone *zone);
243 struct dm_zone *dmz_get_zone_for_reclaim(struct dmz_metadata *zmd);
245 struct dm_zone *dmz_get_chunk_mapping(struct dmz_metadata *zmd,
246 unsigned int chunk, int op);
247 void dmz_put_chunk_mapping(struct dmz_metadata *zmd, struct dm_zone *zone);
248 struct dm_zone *dmz_get_chunk_buffer(struct dmz_metadata *zmd,
249 struct dm_zone *dzone);
251 int dmz_validate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
252 sector_t chunk_block, unsigned int nr_blocks);
253 int dmz_invalidate_blocks(struct dmz_metadata *zmd, struct dm_zone *zone,
254 sector_t chunk_block, unsigned int nr_blocks);
255 int dmz_block_valid(struct dmz_metadata *zmd, struct dm_zone *zone,
256 sector_t chunk_block);
257 int dmz_first_valid_block(struct dmz_metadata *zmd, struct dm_zone *zone,
258 sector_t *chunk_block);
259 int dmz_copy_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
260 struct dm_zone *to_zone);
261 int dmz_merge_valid_blocks(struct dmz_metadata *zmd, struct dm_zone *from_zone,
262 struct dm_zone *to_zone, sector_t chunk_block);
265 * Functions defined in dm-zoned-reclaim.c
267 int dmz_ctr_reclaim(struct dmz_metadata *zmd, struct dmz_reclaim **zrc);
268 void dmz_dtr_reclaim(struct dmz_reclaim *zrc);
269 void dmz_suspend_reclaim(struct dmz_reclaim *zrc);
270 void dmz_resume_reclaim(struct dmz_reclaim *zrc);
271 void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc);
272 void dmz_schedule_reclaim(struct dmz_reclaim *zrc);
275 * Functions defined in dm-zoned-target.c
277 bool dmz_bdev_is_dying(struct dmz_dev *dmz_dev);
278 bool dmz_check_bdev(struct dmz_dev *dmz_dev);
280 #endif /* DM_ZONED_H */