1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2017 Western Digital Corporation or its affiliates.
5 * This file is released under the GPL.
10 #include <linux/module.h>
12 #define DM_MSG_PREFIX "zoned reclaim"
15 struct dmz_metadata *metadata;
17 struct delayed_work work;
18 struct workqueue_struct *wq;
20 struct dm_kcopyd_client *kc;
21 struct dm_kcopyd_throttle kc_throttle;
26 /* Last target access time */
31 * Reclaim state flags.
38 * Number of seconds of target BIO inactivity to consider the target idle.
40 #define DMZ_IDLE_PERIOD (10UL * HZ)
43 * Percentage of unmapped (free) random zones below which reclaim starts
44 * even if the target is busy.
46 #define DMZ_RECLAIM_LOW_UNMAP_RND 30
49 * Percentage of unmapped (free) random zones above which reclaim will
50 * stop if the target is busy.
52 #define DMZ_RECLAIM_HIGH_UNMAP_RND 50
55 * Align a sequential zone write pointer to chunk_block.
57 static int dmz_reclaim_align_wp(struct dmz_reclaim *zrc, struct dm_zone *zone,
60 struct dmz_metadata *zmd = zrc->metadata;
61 struct dmz_dev *dev = dmz_zone_to_dev(zmd, zone);
62 sector_t wp_block = zone->wp_block;
63 unsigned int nr_blocks;
66 if (wp_block == block)
73 * Zeroout the space between the write
74 * pointer and the requested position.
76 nr_blocks = block - wp_block;
77 ret = blkdev_issue_zeroout(dev->bdev,
78 dmz_start_sect(zmd, zone) + dmz_blk2sect(wp_block),
79 dmz_blk2sect(nr_blocks), GFP_NOIO, 0);
82 "Align zone %u wp %llu to %llu (wp+%u) blocks failed %d",
83 zone->id, (unsigned long long)wp_block,
84 (unsigned long long)block, nr_blocks, ret);
89 zone->wp_block = block;
95 * dm_kcopyd_copy end notification.
97 static void dmz_reclaim_kcopy_end(int read_err, unsigned long write_err,
100 struct dmz_reclaim *zrc = context;
102 if (read_err || write_err)
107 clear_bit_unlock(DMZ_RECLAIM_KCOPY, &zrc->flags);
108 smp_mb__after_atomic();
109 wake_up_bit(&zrc->flags, DMZ_RECLAIM_KCOPY);
113 * Copy valid blocks of src_zone into dst_zone.
115 static int dmz_reclaim_copy(struct dmz_reclaim *zrc,
116 struct dm_zone *src_zone, struct dm_zone *dst_zone)
118 struct dmz_metadata *zmd = zrc->metadata;
119 struct dmz_dev *src_dev, *dst_dev;
120 struct dm_io_region src, dst;
121 sector_t block = 0, end_block;
123 sector_t src_zone_block;
124 sector_t dst_zone_block;
125 unsigned long flags = 0;
128 if (dmz_is_seq(src_zone))
129 end_block = src_zone->wp_block;
131 end_block = dmz_zone_nr_blocks(zmd);
132 src_zone_block = dmz_start_block(zmd, src_zone);
133 src_dev = dmz_zone_to_dev(zmd, src_zone);
134 dst_zone_block = dmz_start_block(zmd, dst_zone);
135 dst_dev = dmz_zone_to_dev(zmd, dst_zone);
137 if (dmz_is_seq(dst_zone))
138 set_bit(DM_KCOPYD_WRITE_SEQ, &flags);
140 while (block < end_block) {
141 if (src_dev->flags & DMZ_BDEV_DYING)
143 if (dst_dev->flags & DMZ_BDEV_DYING)
146 /* Get a valid region from the source zone */
147 ret = dmz_first_valid_block(zmd, src_zone, &block);
153 * If we are writing in a sequential zone, we must make sure
154 * that writes are sequential. So Zeroout any eventual hole
157 if (dmz_is_seq(dst_zone)) {
158 ret = dmz_reclaim_align_wp(zrc, dst_zone, block);
163 src.bdev = src_dev->bdev;
164 src.sector = dmz_blk2sect(src_zone_block + block);
165 src.count = dmz_blk2sect(nr_blocks);
167 dst.bdev = dst_dev->bdev;
168 dst.sector = dmz_blk2sect(dst_zone_block + block);
169 dst.count = src.count;
171 /* Copy the valid region */
172 set_bit(DMZ_RECLAIM_KCOPY, &zrc->flags);
173 dm_kcopyd_copy(zrc->kc, &src, 1, &dst, flags,
174 dmz_reclaim_kcopy_end, zrc);
176 /* Wait for copy to complete */
177 wait_on_bit_io(&zrc->flags, DMZ_RECLAIM_KCOPY,
178 TASK_UNINTERRUPTIBLE);
183 if (dmz_is_seq(dst_zone))
184 dst_zone->wp_block = block;
191 * Move valid blocks of dzone buffer zone into dzone (after its write pointer)
192 * and free the buffer zone.
194 static int dmz_reclaim_buf(struct dmz_reclaim *zrc, struct dm_zone *dzone)
196 struct dm_zone *bzone = dzone->bzone;
197 sector_t chunk_block = dzone->wp_block;
198 struct dmz_metadata *zmd = zrc->metadata;
201 DMDEBUG("(%s): Chunk %u, move buf zone %u (weight %u) to data zone %u (weight %u)",
202 dmz_metadata_label(zmd),
203 dzone->chunk, bzone->id, dmz_weight(bzone),
204 dzone->id, dmz_weight(dzone));
206 /* Flush data zone into the buffer zone */
207 ret = dmz_reclaim_copy(zrc, bzone, dzone);
213 /* Validate copied blocks */
214 ret = dmz_merge_valid_blocks(zmd, bzone, dzone, chunk_block);
216 /* Free the buffer zone */
217 dmz_invalidate_blocks(zmd, bzone, 0, dmz_zone_nr_blocks(zmd));
219 dmz_unmap_zone(zmd, bzone);
220 dmz_unlock_zone_reclaim(dzone);
221 dmz_free_zone(zmd, bzone);
225 dmz_unlock_flush(zmd);
231 * Merge valid blocks of dzone into its buffer zone and free dzone.
233 static int dmz_reclaim_seq_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
235 unsigned int chunk = dzone->chunk;
236 struct dm_zone *bzone = dzone->bzone;
237 struct dmz_metadata *zmd = zrc->metadata;
240 DMDEBUG("(%s): Chunk %u, move data zone %u (weight %u) to buf zone %u (weight %u)",
241 dmz_metadata_label(zmd),
242 chunk, dzone->id, dmz_weight(dzone),
243 bzone->id, dmz_weight(bzone));
245 /* Flush data zone into the buffer zone */
246 ret = dmz_reclaim_copy(zrc, dzone, bzone);
252 /* Validate copied blocks */
253 ret = dmz_merge_valid_blocks(zmd, dzone, bzone, 0);
256 * Free the data zone and remap the chunk to
259 dmz_invalidate_blocks(zmd, dzone, 0, dmz_zone_nr_blocks(zmd));
261 dmz_unmap_zone(zmd, bzone);
262 dmz_unmap_zone(zmd, dzone);
263 dmz_unlock_zone_reclaim(dzone);
264 dmz_free_zone(zmd, dzone);
265 dmz_map_zone(zmd, bzone, chunk);
269 dmz_unlock_flush(zmd);
275 * Move valid blocks of the random data zone dzone into a free sequential zone.
276 * Once blocks are moved, remap the zone chunk to the sequential zone.
278 static int dmz_reclaim_rnd_data(struct dmz_reclaim *zrc, struct dm_zone *dzone)
280 unsigned int chunk = dzone->chunk;
281 struct dm_zone *szone = NULL;
282 struct dmz_metadata *zmd = zrc->metadata;
285 /* Get a free sequential zone */
287 szone = dmz_alloc_zone(zmd, DMZ_ALLOC_RECLAIM);
292 DMDEBUG("(%s): Chunk %u, move rnd zone %u (weight %u) to seq zone %u",
293 dmz_metadata_label(zmd),
294 chunk, dzone->id, dmz_weight(dzone), szone->id);
296 /* Flush the random data zone into the sequential zone */
297 ret = dmz_reclaim_copy(zrc, dzone, szone);
302 /* Validate copied blocks */
303 ret = dmz_copy_valid_blocks(zmd, dzone, szone);
306 /* Free the sequential zone */
308 dmz_free_zone(zmd, szone);
311 /* Free the data zone and remap the chunk */
312 dmz_invalidate_blocks(zmd, dzone, 0, dmz_zone_nr_blocks(zmd));
314 dmz_unmap_zone(zmd, dzone);
315 dmz_unlock_zone_reclaim(dzone);
316 dmz_free_zone(zmd, dzone);
317 dmz_map_zone(zmd, szone, chunk);
321 dmz_unlock_flush(zmd);
327 * Reclaim an empty zone.
329 static void dmz_reclaim_empty(struct dmz_reclaim *zrc, struct dm_zone *dzone)
331 struct dmz_metadata *zmd = zrc->metadata;
335 dmz_unmap_zone(zmd, dzone);
336 dmz_unlock_zone_reclaim(dzone);
337 dmz_free_zone(zmd, dzone);
339 dmz_unlock_flush(zmd);
343 * Find a candidate zone for reclaim and process it.
345 static int dmz_do_reclaim(struct dmz_reclaim *zrc)
347 struct dmz_metadata *zmd = zrc->metadata;
348 struct dm_zone *dzone;
349 struct dm_zone *rzone;
353 /* Get a data zone */
354 dzone = dmz_get_zone_for_reclaim(zmd);
359 if (dmz_is_rnd(dzone)) {
360 if (!dmz_weight(dzone)) {
362 dmz_reclaim_empty(zrc, dzone);
366 * Reclaim the random data zone by moving its
367 * valid data blocks to a free sequential zone.
369 ret = dmz_reclaim_rnd_data(zrc, dzone);
374 struct dm_zone *bzone = dzone->bzone;
375 sector_t chunk_block = 0;
377 ret = dmz_first_valid_block(zmd, bzone, &chunk_block);
381 if (ret == 0 || chunk_block >= dzone->wp_block) {
383 * The buffer zone is empty or its valid blocks are
384 * after the data zone write pointer.
386 ret = dmz_reclaim_buf(zrc, dzone);
390 * Reclaim the data zone by merging it into the
391 * buffer zone so that the buffer zone itself can
392 * be later reclaimed.
394 ret = dmz_reclaim_seq_data(zrc, dzone);
400 dmz_unlock_zone_reclaim(dzone);
404 ret = dmz_flush_metadata(zrc->metadata);
406 DMDEBUG("(%s): Metadata flush for zone %u failed, err %d",
407 dmz_metadata_label(zmd), rzone->id, ret);
411 DMDEBUG("(%s): Reclaimed zone %u in %u ms",
412 dmz_metadata_label(zmd),
413 rzone->id, jiffies_to_msecs(jiffies - start));
418 * Test if the target device is idle.
420 static inline int dmz_target_idle(struct dmz_reclaim *zrc)
422 return time_is_before_jiffies(zrc->atime + DMZ_IDLE_PERIOD);
426 * Test if reclaim is necessary.
428 static bool dmz_should_reclaim(struct dmz_reclaim *zrc)
430 struct dmz_metadata *zmd = zrc->metadata;
431 unsigned int nr_rnd = dmz_nr_rnd_zones(zmd);
432 unsigned int nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd);
433 unsigned int p_unmap_rnd = nr_unmap_rnd * 100 / nr_rnd;
435 /* Reclaim when idle */
436 if (dmz_target_idle(zrc) && nr_unmap_rnd < nr_rnd)
439 /* If there are still plenty of random zones, do not reclaim */
440 if (p_unmap_rnd >= DMZ_RECLAIM_HIGH_UNMAP_RND)
444 * If the percentage of unmapped random zones is low,
445 * reclaim even if the target is busy.
447 return p_unmap_rnd <= DMZ_RECLAIM_LOW_UNMAP_RND;
451 * Reclaim work function.
453 static void dmz_reclaim_work(struct work_struct *work)
455 struct dmz_reclaim *zrc = container_of(work, struct dmz_reclaim, work.work);
456 struct dmz_metadata *zmd = zrc->metadata;
457 unsigned int nr_rnd, nr_unmap_rnd;
458 unsigned int p_unmap_rnd;
461 if (dmz_dev_is_dying(zmd))
464 if (!dmz_should_reclaim(zrc)) {
465 mod_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
470 * We need to start reclaiming random zones: set up zone copy
471 * throttling to either go fast if we are very low on random zones
472 * and slower if there are still some free random zones to avoid
473 * as much as possible to negatively impact the user workload.
475 nr_rnd = dmz_nr_rnd_zones(zmd);
476 nr_unmap_rnd = dmz_nr_unmap_rnd_zones(zmd);
477 p_unmap_rnd = nr_unmap_rnd * 100 / nr_rnd;
478 if (dmz_target_idle(zrc) || p_unmap_rnd < DMZ_RECLAIM_LOW_UNMAP_RND / 2) {
479 /* Idle or very low percentage: go fast */
480 zrc->kc_throttle.throttle = 100;
482 /* Busy but we still have some random zone: throttle */
483 zrc->kc_throttle.throttle = min(75U, 100U - p_unmap_rnd / 2);
486 DMDEBUG("(%s): Reclaim (%u): %s, %u%% free rnd zones (%u/%u)",
487 dmz_metadata_label(zmd),
488 zrc->kc_throttle.throttle,
489 (dmz_target_idle(zrc) ? "Idle" : "Busy"),
490 p_unmap_rnd, nr_unmap_rnd, nr_rnd);
492 ret = dmz_do_reclaim(zrc);
494 DMDEBUG("(%s): Reclaim error %d",
495 dmz_metadata_label(zmd), ret);
496 if (!dmz_check_dev(zmd))
500 dmz_schedule_reclaim(zrc);
504 * Initialize reclaim.
506 int dmz_ctr_reclaim(struct dmz_metadata *zmd,
507 struct dmz_reclaim **reclaim)
509 struct dmz_reclaim *zrc;
512 zrc = kzalloc(sizeof(struct dmz_reclaim), GFP_KERNEL);
517 zrc->atime = jiffies;
519 /* Reclaim kcopyd client */
520 zrc->kc = dm_kcopyd_client_create(&zrc->kc_throttle);
521 if (IS_ERR(zrc->kc)) {
522 ret = PTR_ERR(zrc->kc);
528 INIT_DELAYED_WORK(&zrc->work, dmz_reclaim_work);
529 zrc->wq = alloc_ordered_workqueue("dmz_rwq_%s", WQ_MEM_RECLAIM,
530 dmz_metadata_label(zmd));
537 queue_delayed_work(zrc->wq, &zrc->work, 0);
542 dm_kcopyd_client_destroy(zrc->kc);
551 void dmz_dtr_reclaim(struct dmz_reclaim *zrc)
553 cancel_delayed_work_sync(&zrc->work);
554 destroy_workqueue(zrc->wq);
555 dm_kcopyd_client_destroy(zrc->kc);
562 void dmz_suspend_reclaim(struct dmz_reclaim *zrc)
564 cancel_delayed_work_sync(&zrc->work);
570 void dmz_resume_reclaim(struct dmz_reclaim *zrc)
572 queue_delayed_work(zrc->wq, &zrc->work, DMZ_IDLE_PERIOD);
578 void dmz_reclaim_bio_acc(struct dmz_reclaim *zrc)
580 zrc->atime = jiffies;
584 * Start reclaim if necessary.
586 void dmz_schedule_reclaim(struct dmz_reclaim *zrc)
588 if (dmz_should_reclaim(zrc))
589 mod_delayed_work(zrc->wq, &zrc->work, 0);