Merge tag 'jfs-5.2' of git://github.com/kleikamp/linux-shaggy
[linux-2.6-microblaze.git] / drivers / lightnvm / pblk-gc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2016 CNEX Labs
4  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5  *                  Matias Bjorling <matias@cnexlabs.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version
9  * 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * pblk-gc.c - pblk's garbage collector
17  */
18
19 #include "pblk.h"
20 #include "pblk-trace.h"
21 #include <linux/delay.h>
22
23
24 static void pblk_gc_free_gc_rq(struct pblk_gc_rq *gc_rq)
25 {
26         if (gc_rq->data)
27                 vfree(gc_rq->data);
28         kfree(gc_rq);
29 }
30
31 static int pblk_gc_write(struct pblk *pblk)
32 {
33         struct pblk_gc *gc = &pblk->gc;
34         struct pblk_gc_rq *gc_rq, *tgc_rq;
35         LIST_HEAD(w_list);
36
37         spin_lock(&gc->w_lock);
38         if (list_empty(&gc->w_list)) {
39                 spin_unlock(&gc->w_lock);
40                 return 1;
41         }
42
43         list_cut_position(&w_list, &gc->w_list, gc->w_list.prev);
44         gc->w_entries = 0;
45         spin_unlock(&gc->w_lock);
46
47         list_for_each_entry_safe(gc_rq, tgc_rq, &w_list, list) {
48                 pblk_write_gc_to_cache(pblk, gc_rq);
49                 list_del(&gc_rq->list);
50                 kref_put(&gc_rq->line->ref, pblk_line_put);
51                 pblk_gc_free_gc_rq(gc_rq);
52         }
53
54         return 0;
55 }
56
57 static void pblk_gc_writer_kick(struct pblk_gc *gc)
58 {
59         wake_up_process(gc->gc_writer_ts);
60 }
61
62 static void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line)
63 {
64         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
65         struct list_head *move_list;
66
67         spin_lock(&line->lock);
68         WARN_ON(line->state != PBLK_LINESTATE_GC);
69         line->state = PBLK_LINESTATE_CLOSED;
70         trace_pblk_line_state(pblk_disk_name(pblk), line->id,
71                                         line->state);
72         move_list = pblk_line_gc_list(pblk, line);
73         spin_unlock(&line->lock);
74
75         if (move_list) {
76                 spin_lock(&l_mg->gc_lock);
77                 list_add_tail(&line->list, move_list);
78                 spin_unlock(&l_mg->gc_lock);
79         }
80 }
81
82 static void pblk_gc_line_ws(struct work_struct *work)
83 {
84         struct pblk_line_ws *gc_rq_ws = container_of(work,
85                                                 struct pblk_line_ws, ws);
86         struct pblk *pblk = gc_rq_ws->pblk;
87         struct nvm_tgt_dev *dev = pblk->dev;
88         struct nvm_geo *geo = &dev->geo;
89         struct pblk_gc *gc = &pblk->gc;
90         struct pblk_line *line = gc_rq_ws->line;
91         struct pblk_gc_rq *gc_rq = gc_rq_ws->priv;
92         int ret;
93
94         up(&gc->gc_sem);
95
96         gc_rq->data = vmalloc(array_size(gc_rq->nr_secs, geo->csecs));
97         if (!gc_rq->data) {
98                 pblk_err(pblk, "could not GC line:%d (%d/%d)\n",
99                                         line->id, *line->vsc, gc_rq->nr_secs);
100                 goto out;
101         }
102
103         /* Read from GC victim block */
104         ret = pblk_submit_read_gc(pblk, gc_rq);
105         if (ret) {
106                 pblk_err(pblk, "failed GC read in line:%d (err:%d)\n",
107                                                                 line->id, ret);
108                 goto out;
109         }
110
111         if (!gc_rq->secs_to_gc)
112                 goto out;
113
114 retry:
115         spin_lock(&gc->w_lock);
116         if (gc->w_entries >= PBLK_GC_RQ_QD) {
117                 spin_unlock(&gc->w_lock);
118                 pblk_gc_writer_kick(&pblk->gc);
119                 usleep_range(128, 256);
120                 goto retry;
121         }
122         gc->w_entries++;
123         list_add_tail(&gc_rq->list, &gc->w_list);
124         spin_unlock(&gc->w_lock);
125
126         pblk_gc_writer_kick(&pblk->gc);
127
128         kfree(gc_rq_ws);
129         return;
130
131 out:
132         pblk_gc_free_gc_rq(gc_rq);
133         kref_put(&line->ref, pblk_line_put);
134         kfree(gc_rq_ws);
135 }
136
137 static __le64 *get_lba_list_from_emeta(struct pblk *pblk,
138                                        struct pblk_line *line)
139 {
140         struct line_emeta *emeta_buf;
141         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
142         struct pblk_line_meta *lm = &pblk->lm;
143         unsigned int lba_list_size = lm->emeta_len[2];
144         __le64 *lba_list;
145         int ret;
146
147         emeta_buf = pblk_malloc(lm->emeta_len[0],
148                                 l_mg->emeta_alloc_type, GFP_KERNEL);
149         if (!emeta_buf)
150                 return NULL;
151
152         ret = pblk_line_emeta_read(pblk, line, emeta_buf);
153         if (ret) {
154                 pblk_err(pblk, "line %d read emeta failed (%d)\n",
155                                 line->id, ret);
156                 pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
157                 return NULL;
158         }
159
160         /* If this read fails, it means that emeta is corrupted.
161          * For now, leave the line untouched.
162          * TODO: Implement a recovery routine that scans and moves
163          * all sectors on the line.
164          */
165
166         ret = pblk_recov_check_emeta(pblk, emeta_buf);
167         if (ret) {
168                 pblk_err(pblk, "inconsistent emeta (line %d)\n",
169                                 line->id);
170                 pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
171                 return NULL;
172         }
173
174         lba_list = pblk_malloc(lba_list_size,
175                                l_mg->emeta_alloc_type, GFP_KERNEL);
176         if (lba_list)
177                 memcpy(lba_list, emeta_to_lbas(pblk, emeta_buf), lba_list_size);
178
179         pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
180
181         return lba_list;
182 }
183
184 static void pblk_gc_line_prepare_ws(struct work_struct *work)
185 {
186         struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
187                                                                         ws);
188         struct pblk *pblk = line_ws->pblk;
189         struct pblk_line *line = line_ws->line;
190         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
191         struct pblk_line_meta *lm = &pblk->lm;
192         struct pblk_gc *gc = &pblk->gc;
193         struct pblk_line_ws *gc_rq_ws;
194         struct pblk_gc_rq *gc_rq;
195         __le64 *lba_list;
196         unsigned long *invalid_bitmap;
197         int sec_left, nr_secs, bit;
198
199         invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_KERNEL);
200         if (!invalid_bitmap)
201                 goto fail_free_ws;
202
203         if (line->w_err_gc->has_write_err) {
204                 lba_list = line->w_err_gc->lba_list;
205                 line->w_err_gc->lba_list = NULL;
206         } else {
207                 lba_list = get_lba_list_from_emeta(pblk, line);
208                 if (!lba_list) {
209                         pblk_err(pblk, "could not interpret emeta (line %d)\n",
210                                         line->id);
211                         goto fail_free_invalid_bitmap;
212                 }
213         }
214
215         spin_lock(&line->lock);
216         bitmap_copy(invalid_bitmap, line->invalid_bitmap, lm->sec_per_line);
217         sec_left = pblk_line_vsc(line);
218         spin_unlock(&line->lock);
219
220         if (sec_left < 0) {
221                 pblk_err(pblk, "corrupted GC line (%d)\n", line->id);
222                 goto fail_free_lba_list;
223         }
224
225         bit = -1;
226 next_rq:
227         gc_rq = kmalloc(sizeof(struct pblk_gc_rq), GFP_KERNEL);
228         if (!gc_rq)
229                 goto fail_free_lba_list;
230
231         nr_secs = 0;
232         do {
233                 bit = find_next_zero_bit(invalid_bitmap, lm->sec_per_line,
234                                                                 bit + 1);
235                 if (bit > line->emeta_ssec)
236                         break;
237
238                 gc_rq->paddr_list[nr_secs] = bit;
239                 gc_rq->lba_list[nr_secs++] = le64_to_cpu(lba_list[bit]);
240         } while (nr_secs < pblk->max_write_pgs);
241
242         if (unlikely(!nr_secs)) {
243                 kfree(gc_rq);
244                 goto out;
245         }
246
247         gc_rq->nr_secs = nr_secs;
248         gc_rq->line = line;
249
250         gc_rq_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
251         if (!gc_rq_ws)
252                 goto fail_free_gc_rq;
253
254         gc_rq_ws->pblk = pblk;
255         gc_rq_ws->line = line;
256         gc_rq_ws->priv = gc_rq;
257
258         /* The write GC path can be much slower than the read GC one due to
259          * the budget imposed by the rate-limiter. Balance in case that we get
260          * back pressure from the write GC path.
261          */
262         while (down_timeout(&gc->gc_sem, msecs_to_jiffies(30000)))
263                 io_schedule();
264
265         kref_get(&line->ref);
266
267         INIT_WORK(&gc_rq_ws->ws, pblk_gc_line_ws);
268         queue_work(gc->gc_line_reader_wq, &gc_rq_ws->ws);
269
270         sec_left -= nr_secs;
271         if (sec_left > 0)
272                 goto next_rq;
273
274 out:
275         pblk_mfree(lba_list, l_mg->emeta_alloc_type);
276         kfree(line_ws);
277         kfree(invalid_bitmap);
278
279         kref_put(&line->ref, pblk_line_put);
280         atomic_dec(&gc->read_inflight_gc);
281
282         return;
283
284 fail_free_gc_rq:
285         kfree(gc_rq);
286 fail_free_lba_list:
287         pblk_mfree(lba_list, l_mg->emeta_alloc_type);
288 fail_free_invalid_bitmap:
289         kfree(invalid_bitmap);
290 fail_free_ws:
291         kfree(line_ws);
292
293         pblk_put_line_back(pblk, line);
294         kref_put(&line->ref, pblk_line_put);
295         atomic_dec(&gc->read_inflight_gc);
296
297         pblk_err(pblk, "failed to GC line %d\n", line->id);
298 }
299
300 static int pblk_gc_line(struct pblk *pblk, struct pblk_line *line)
301 {
302         struct pblk_gc *gc = &pblk->gc;
303         struct pblk_line_ws *line_ws;
304
305         pblk_debug(pblk, "line '%d' being reclaimed for GC\n", line->id);
306
307         line_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
308         if (!line_ws)
309                 return -ENOMEM;
310
311         line_ws->pblk = pblk;
312         line_ws->line = line;
313
314         atomic_inc(&gc->pipeline_gc);
315         INIT_WORK(&line_ws->ws, pblk_gc_line_prepare_ws);
316         queue_work(gc->gc_reader_wq, &line_ws->ws);
317
318         return 0;
319 }
320
321 static void pblk_gc_reader_kick(struct pblk_gc *gc)
322 {
323         wake_up_process(gc->gc_reader_ts);
324 }
325
326 static void pblk_gc_kick(struct pblk *pblk)
327 {
328         struct pblk_gc *gc = &pblk->gc;
329
330         pblk_gc_writer_kick(gc);
331         pblk_gc_reader_kick(gc);
332
333         /* If we're shutting down GC, let's not start it up again */
334         if (gc->gc_enabled) {
335                 wake_up_process(gc->gc_ts);
336                 mod_timer(&gc->gc_timer,
337                           jiffies + msecs_to_jiffies(GC_TIME_MSECS));
338         }
339 }
340
341 static int pblk_gc_read(struct pblk *pblk)
342 {
343         struct pblk_gc *gc = &pblk->gc;
344         struct pblk_line *line;
345
346         spin_lock(&gc->r_lock);
347         if (list_empty(&gc->r_list)) {
348                 spin_unlock(&gc->r_lock);
349                 return 1;
350         }
351
352         line = list_first_entry(&gc->r_list, struct pblk_line, list);
353         list_del(&line->list);
354         spin_unlock(&gc->r_lock);
355
356         pblk_gc_kick(pblk);
357
358         if (pblk_gc_line(pblk, line))
359                 pblk_err(pblk, "failed to GC line %d\n", line->id);
360
361         return 0;
362 }
363
364 static struct pblk_line *pblk_gc_get_victim_line(struct pblk *pblk,
365                                                  struct list_head *group_list)
366 {
367         struct pblk_line *line, *victim;
368         unsigned int line_vsc = ~0x0L, victim_vsc = ~0x0L;
369
370         victim = list_first_entry(group_list, struct pblk_line, list);
371
372         list_for_each_entry(line, group_list, list) {
373                 if (!atomic_read(&line->sec_to_update))
374                         line_vsc = le32_to_cpu(*line->vsc);
375                 if (line_vsc < victim_vsc) {
376                         victim = line;
377                         victim_vsc = le32_to_cpu(*victim->vsc);
378                 }
379         }
380
381         if (victim_vsc == ~0x0)
382                 return NULL;
383
384         return victim;
385 }
386
387 static bool pblk_gc_should_run(struct pblk_gc *gc, struct pblk_rl *rl)
388 {
389         unsigned int nr_blocks_free, nr_blocks_need;
390         unsigned int werr_lines = atomic_read(&rl->werr_lines);
391
392         nr_blocks_need = pblk_rl_high_thrs(rl);
393         nr_blocks_free = pblk_rl_nr_free_blks(rl);
394
395         /* This is not critical, no need to take lock here */
396         return ((werr_lines > 0) ||
397                 ((gc->gc_active) && (nr_blocks_need > nr_blocks_free)));
398 }
399
400 void pblk_gc_free_full_lines(struct pblk *pblk)
401 {
402         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
403         struct pblk_gc *gc = &pblk->gc;
404         struct pblk_line *line;
405
406         do {
407                 spin_lock(&l_mg->gc_lock);
408                 if (list_empty(&l_mg->gc_full_list)) {
409                         spin_unlock(&l_mg->gc_lock);
410                         return;
411                 }
412
413                 line = list_first_entry(&l_mg->gc_full_list,
414                                                         struct pblk_line, list);
415
416                 spin_lock(&line->lock);
417                 WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
418                 line->state = PBLK_LINESTATE_GC;
419                 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
420                                         line->state);
421                 spin_unlock(&line->lock);
422
423                 list_del(&line->list);
424                 spin_unlock(&l_mg->gc_lock);
425
426                 atomic_inc(&gc->pipeline_gc);
427                 kref_put(&line->ref, pblk_line_put);
428         } while (1);
429 }
430
431 /*
432  * Lines with no valid sectors will be returned to the free list immediately. If
433  * GC is activated - either because the free block count is under the determined
434  * threshold, or because it is being forced from user space - only lines with a
435  * high count of invalid sectors will be recycled.
436  */
437 static void pblk_gc_run(struct pblk *pblk)
438 {
439         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
440         struct pblk_gc *gc = &pblk->gc;
441         struct pblk_line *line;
442         struct list_head *group_list;
443         bool run_gc;
444         int read_inflight_gc, gc_group = 0, prev_group = 0;
445
446         pblk_gc_free_full_lines(pblk);
447
448         run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
449         if (!run_gc || (atomic_read(&gc->read_inflight_gc) >= PBLK_GC_L_QD))
450                 return;
451
452 next_gc_group:
453         group_list = l_mg->gc_lists[gc_group++];
454
455         do {
456                 spin_lock(&l_mg->gc_lock);
457
458                 line = pblk_gc_get_victim_line(pblk, group_list);
459                 if (!line) {
460                         spin_unlock(&l_mg->gc_lock);
461                         break;
462                 }
463
464                 spin_lock(&line->lock);
465                 WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
466                 line->state = PBLK_LINESTATE_GC;
467                 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
468                                         line->state);
469                 spin_unlock(&line->lock);
470
471                 list_del(&line->list);
472                 spin_unlock(&l_mg->gc_lock);
473
474                 spin_lock(&gc->r_lock);
475                 list_add_tail(&line->list, &gc->r_list);
476                 spin_unlock(&gc->r_lock);
477
478                 read_inflight_gc = atomic_inc_return(&gc->read_inflight_gc);
479                 pblk_gc_reader_kick(gc);
480
481                 prev_group = 1;
482
483                 /* No need to queue up more GC lines than we can handle */
484                 run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
485                 if (!run_gc || read_inflight_gc >= PBLK_GC_L_QD)
486                         break;
487         } while (1);
488
489         if (!prev_group && pblk->rl.rb_state > gc_group &&
490                                                 gc_group < PBLK_GC_NR_LISTS)
491                 goto next_gc_group;
492 }
493
494 static void pblk_gc_timer(struct timer_list *t)
495 {
496         struct pblk *pblk = from_timer(pblk, t, gc.gc_timer);
497
498         pblk_gc_kick(pblk);
499 }
500
501 static int pblk_gc_ts(void *data)
502 {
503         struct pblk *pblk = data;
504
505         while (!kthread_should_stop()) {
506                 pblk_gc_run(pblk);
507                 set_current_state(TASK_INTERRUPTIBLE);
508                 io_schedule();
509         }
510
511         return 0;
512 }
513
514 static int pblk_gc_writer_ts(void *data)
515 {
516         struct pblk *pblk = data;
517
518         while (!kthread_should_stop()) {
519                 if (!pblk_gc_write(pblk))
520                         continue;
521                 set_current_state(TASK_INTERRUPTIBLE);
522                 io_schedule();
523         }
524
525         return 0;
526 }
527
528 static int pblk_gc_reader_ts(void *data)
529 {
530         struct pblk *pblk = data;
531         struct pblk_gc *gc = &pblk->gc;
532
533         while (!kthread_should_stop()) {
534                 if (!pblk_gc_read(pblk))
535                         continue;
536                 set_current_state(TASK_INTERRUPTIBLE);
537                 io_schedule();
538         }
539
540 #ifdef CONFIG_NVM_PBLK_DEBUG
541         pblk_info(pblk, "flushing gc pipeline, %d lines left\n",
542                 atomic_read(&gc->pipeline_gc));
543 #endif
544
545         do {
546                 if (!atomic_read(&gc->pipeline_gc))
547                         break;
548
549                 schedule();
550         } while (1);
551
552         return 0;
553 }
554
555 static void pblk_gc_start(struct pblk *pblk)
556 {
557         pblk->gc.gc_active = 1;
558         pblk_debug(pblk, "gc start\n");
559 }
560
561 void pblk_gc_should_start(struct pblk *pblk)
562 {
563         struct pblk_gc *gc = &pblk->gc;
564
565         if (gc->gc_enabled && !gc->gc_active) {
566                 pblk_gc_start(pblk);
567                 pblk_gc_kick(pblk);
568         }
569 }
570
571 void pblk_gc_should_stop(struct pblk *pblk)
572 {
573         struct pblk_gc *gc = &pblk->gc;
574
575         if (gc->gc_active && !gc->gc_forced)
576                 gc->gc_active = 0;
577 }
578
579 void pblk_gc_should_kick(struct pblk *pblk)
580 {
581         pblk_rl_update_rates(&pblk->rl);
582 }
583
584 void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
585                               int *gc_active)
586 {
587         struct pblk_gc *gc = &pblk->gc;
588
589         spin_lock(&gc->lock);
590         *gc_enabled = gc->gc_enabled;
591         *gc_active = gc->gc_active;
592         spin_unlock(&gc->lock);
593 }
594
595 int pblk_gc_sysfs_force(struct pblk *pblk, int force)
596 {
597         struct pblk_gc *gc = &pblk->gc;
598
599         if (force < 0 || force > 1)
600                 return -EINVAL;
601
602         spin_lock(&gc->lock);
603         gc->gc_forced = force;
604
605         if (force)
606                 gc->gc_enabled = 1;
607         else
608                 gc->gc_enabled = 0;
609         spin_unlock(&gc->lock);
610
611         pblk_gc_should_start(pblk);
612
613         return 0;
614 }
615
616 int pblk_gc_init(struct pblk *pblk)
617 {
618         struct pblk_gc *gc = &pblk->gc;
619         int ret;
620
621         gc->gc_ts = kthread_create(pblk_gc_ts, pblk, "pblk-gc-ts");
622         if (IS_ERR(gc->gc_ts)) {
623                 pblk_err(pblk, "could not allocate GC main kthread\n");
624                 return PTR_ERR(gc->gc_ts);
625         }
626
627         gc->gc_writer_ts = kthread_create(pblk_gc_writer_ts, pblk,
628                                                         "pblk-gc-writer-ts");
629         if (IS_ERR(gc->gc_writer_ts)) {
630                 pblk_err(pblk, "could not allocate GC writer kthread\n");
631                 ret = PTR_ERR(gc->gc_writer_ts);
632                 goto fail_free_main_kthread;
633         }
634
635         gc->gc_reader_ts = kthread_create(pblk_gc_reader_ts, pblk,
636                                                         "pblk-gc-reader-ts");
637         if (IS_ERR(gc->gc_reader_ts)) {
638                 pblk_err(pblk, "could not allocate GC reader kthread\n");
639                 ret = PTR_ERR(gc->gc_reader_ts);
640                 goto fail_free_writer_kthread;
641         }
642
643         timer_setup(&gc->gc_timer, pblk_gc_timer, 0);
644         mod_timer(&gc->gc_timer, jiffies + msecs_to_jiffies(GC_TIME_MSECS));
645
646         gc->gc_active = 0;
647         gc->gc_forced = 0;
648         gc->gc_enabled = 1;
649         gc->w_entries = 0;
650         atomic_set(&gc->read_inflight_gc, 0);
651         atomic_set(&gc->pipeline_gc, 0);
652
653         /* Workqueue that reads valid sectors from a line and submit them to the
654          * GC writer to be recycled.
655          */
656         gc->gc_line_reader_wq = alloc_workqueue("pblk-gc-line-reader-wq",
657                         WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_GC_MAX_READERS);
658         if (!gc->gc_line_reader_wq) {
659                 pblk_err(pblk, "could not allocate GC line reader workqueue\n");
660                 ret = -ENOMEM;
661                 goto fail_free_reader_kthread;
662         }
663
664         /* Workqueue that prepare lines for GC */
665         gc->gc_reader_wq = alloc_workqueue("pblk-gc-line_wq",
666                                         WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
667         if (!gc->gc_reader_wq) {
668                 pblk_err(pblk, "could not allocate GC reader workqueue\n");
669                 ret = -ENOMEM;
670                 goto fail_free_reader_line_wq;
671         }
672
673         spin_lock_init(&gc->lock);
674         spin_lock_init(&gc->w_lock);
675         spin_lock_init(&gc->r_lock);
676
677         sema_init(&gc->gc_sem, PBLK_GC_RQ_QD);
678
679         INIT_LIST_HEAD(&gc->w_list);
680         INIT_LIST_HEAD(&gc->r_list);
681
682         return 0;
683
684 fail_free_reader_line_wq:
685         destroy_workqueue(gc->gc_line_reader_wq);
686 fail_free_reader_kthread:
687         kthread_stop(gc->gc_reader_ts);
688 fail_free_writer_kthread:
689         kthread_stop(gc->gc_writer_ts);
690 fail_free_main_kthread:
691         kthread_stop(gc->gc_ts);
692
693         return ret;
694 }
695
696 void pblk_gc_exit(struct pblk *pblk, bool graceful)
697 {
698         struct pblk_gc *gc = &pblk->gc;
699
700         gc->gc_enabled = 0;
701         del_timer_sync(&gc->gc_timer);
702         gc->gc_active = 0;
703
704         if (gc->gc_ts)
705                 kthread_stop(gc->gc_ts);
706
707         if (gc->gc_reader_ts)
708                 kthread_stop(gc->gc_reader_ts);
709
710         if (graceful) {
711                 flush_workqueue(gc->gc_reader_wq);
712                 flush_workqueue(gc->gc_line_reader_wq);
713         }
714
715         destroy_workqueue(gc->gc_reader_wq);
716         destroy_workqueue(gc->gc_line_reader_wq);
717
718         if (gc->gc_writer_ts)
719                 kthread_stop(gc->gc_writer_ts);
720 }