treewide: Replace GPLv2 boilerplate/reference with SPDX - rule 131
[linux-2.6-microblaze.git] / drivers / lightnvm / pblk-gc.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2016 CNEX Labs
4  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5  *                  Matias Bjorling <matias@cnexlabs.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version
9  * 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * pblk-gc.c - pblk's garbage collector
17  */
18
19 #include "pblk.h"
20 #include "pblk-trace.h"
21 #include <linux/delay.h>
22
23
24 static void pblk_gc_free_gc_rq(struct pblk_gc_rq *gc_rq)
25 {
26         if (gc_rq->data)
27                 vfree(gc_rq->data);
28         kfree(gc_rq);
29 }
30
31 static int pblk_gc_write(struct pblk *pblk)
32 {
33         struct pblk_gc *gc = &pblk->gc;
34         struct pblk_gc_rq *gc_rq, *tgc_rq;
35         LIST_HEAD(w_list);
36
37         spin_lock(&gc->w_lock);
38         if (list_empty(&gc->w_list)) {
39                 spin_unlock(&gc->w_lock);
40                 return 1;
41         }
42
43         list_cut_position(&w_list, &gc->w_list, gc->w_list.prev);
44         gc->w_entries = 0;
45         spin_unlock(&gc->w_lock);
46
47         list_for_each_entry_safe(gc_rq, tgc_rq, &w_list, list) {
48                 pblk_write_gc_to_cache(pblk, gc_rq);
49                 list_del(&gc_rq->list);
50                 kref_put(&gc_rq->line->ref, pblk_line_put);
51                 pblk_gc_free_gc_rq(gc_rq);
52         }
53
54         return 0;
55 }
56
57 static void pblk_gc_writer_kick(struct pblk_gc *gc)
58 {
59         wake_up_process(gc->gc_writer_ts);
60 }
61
62 void pblk_put_line_back(struct pblk *pblk, struct pblk_line *line)
63 {
64         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
65         struct list_head *move_list;
66
67         spin_lock(&l_mg->gc_lock);
68         spin_lock(&line->lock);
69         WARN_ON(line->state != PBLK_LINESTATE_GC);
70         line->state = PBLK_LINESTATE_CLOSED;
71         trace_pblk_line_state(pblk_disk_name(pblk), line->id,
72                                         line->state);
73
74         /* We need to reset gc_group in order to ensure that
75          * pblk_line_gc_list will return proper move_list
76          * since right now current line is not on any of the
77          * gc lists.
78          */
79         line->gc_group = PBLK_LINEGC_NONE;
80         move_list = pblk_line_gc_list(pblk, line);
81         spin_unlock(&line->lock);
82         list_add_tail(&line->list, move_list);
83         spin_unlock(&l_mg->gc_lock);
84 }
85
86 static void pblk_gc_line_ws(struct work_struct *work)
87 {
88         struct pblk_line_ws *gc_rq_ws = container_of(work,
89                                                 struct pblk_line_ws, ws);
90         struct pblk *pblk = gc_rq_ws->pblk;
91         struct pblk_gc *gc = &pblk->gc;
92         struct pblk_line *line = gc_rq_ws->line;
93         struct pblk_gc_rq *gc_rq = gc_rq_ws->priv;
94         int ret;
95
96         up(&gc->gc_sem);
97
98         /* Read from GC victim block */
99         ret = pblk_submit_read_gc(pblk, gc_rq);
100         if (ret) {
101                 line->w_err_gc->has_gc_err = 1;
102                 goto out;
103         }
104
105         if (!gc_rq->secs_to_gc)
106                 goto out;
107
108 retry:
109         spin_lock(&gc->w_lock);
110         if (gc->w_entries >= PBLK_GC_RQ_QD) {
111                 spin_unlock(&gc->w_lock);
112                 pblk_gc_writer_kick(&pblk->gc);
113                 usleep_range(128, 256);
114                 goto retry;
115         }
116         gc->w_entries++;
117         list_add_tail(&gc_rq->list, &gc->w_list);
118         spin_unlock(&gc->w_lock);
119
120         pblk_gc_writer_kick(&pblk->gc);
121
122         kfree(gc_rq_ws);
123         return;
124
125 out:
126         pblk_gc_free_gc_rq(gc_rq);
127         kref_put(&line->ref, pblk_line_put);
128         kfree(gc_rq_ws);
129 }
130
131 static __le64 *get_lba_list_from_emeta(struct pblk *pblk,
132                                        struct pblk_line *line)
133 {
134         struct line_emeta *emeta_buf;
135         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
136         struct pblk_line_meta *lm = &pblk->lm;
137         unsigned int lba_list_size = lm->emeta_len[2];
138         __le64 *lba_list;
139         int ret;
140
141         emeta_buf = pblk_malloc(lm->emeta_len[0],
142                                 l_mg->emeta_alloc_type, GFP_KERNEL);
143         if (!emeta_buf)
144                 return NULL;
145
146         ret = pblk_line_emeta_read(pblk, line, emeta_buf);
147         if (ret) {
148                 pblk_err(pblk, "line %d read emeta failed (%d)\n",
149                                 line->id, ret);
150                 pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
151                 return NULL;
152         }
153
154         /* If this read fails, it means that emeta is corrupted.
155          * For now, leave the line untouched.
156          * TODO: Implement a recovery routine that scans and moves
157          * all sectors on the line.
158          */
159
160         ret = pblk_recov_check_emeta(pblk, emeta_buf);
161         if (ret) {
162                 pblk_err(pblk, "inconsistent emeta (line %d)\n",
163                                 line->id);
164                 pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
165                 return NULL;
166         }
167
168         lba_list = pblk_malloc(lba_list_size,
169                                l_mg->emeta_alloc_type, GFP_KERNEL);
170         if (lba_list)
171                 memcpy(lba_list, emeta_to_lbas(pblk, emeta_buf), lba_list_size);
172
173         pblk_mfree(emeta_buf, l_mg->emeta_alloc_type);
174
175         return lba_list;
176 }
177
178 static void pblk_gc_line_prepare_ws(struct work_struct *work)
179 {
180         struct pblk_line_ws *line_ws = container_of(work, struct pblk_line_ws,
181                                                                         ws);
182         struct pblk *pblk = line_ws->pblk;
183         struct pblk_line *line = line_ws->line;
184         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
185         struct pblk_line_meta *lm = &pblk->lm;
186         struct nvm_tgt_dev *dev = pblk->dev;
187         struct nvm_geo *geo = &dev->geo;
188         struct pblk_gc *gc = &pblk->gc;
189         struct pblk_line_ws *gc_rq_ws;
190         struct pblk_gc_rq *gc_rq;
191         __le64 *lba_list;
192         unsigned long *invalid_bitmap;
193         int sec_left, nr_secs, bit;
194
195         invalid_bitmap = kmalloc(lm->sec_bitmap_len, GFP_KERNEL);
196         if (!invalid_bitmap)
197                 goto fail_free_ws;
198
199         if (line->w_err_gc->has_write_err) {
200                 lba_list = line->w_err_gc->lba_list;
201                 line->w_err_gc->lba_list = NULL;
202         } else {
203                 lba_list = get_lba_list_from_emeta(pblk, line);
204                 if (!lba_list) {
205                         pblk_err(pblk, "could not interpret emeta (line %d)\n",
206                                         line->id);
207                         goto fail_free_invalid_bitmap;
208                 }
209         }
210
211         spin_lock(&line->lock);
212         bitmap_copy(invalid_bitmap, line->invalid_bitmap, lm->sec_per_line);
213         sec_left = pblk_line_vsc(line);
214         spin_unlock(&line->lock);
215
216         if (sec_left < 0) {
217                 pblk_err(pblk, "corrupted GC line (%d)\n", line->id);
218                 goto fail_free_lba_list;
219         }
220
221         bit = -1;
222 next_rq:
223         gc_rq = kmalloc(sizeof(struct pblk_gc_rq), GFP_KERNEL);
224         if (!gc_rq)
225                 goto fail_free_lba_list;
226
227         nr_secs = 0;
228         do {
229                 bit = find_next_zero_bit(invalid_bitmap, lm->sec_per_line,
230                                                                 bit + 1);
231                 if (bit > line->emeta_ssec)
232                         break;
233
234                 gc_rq->paddr_list[nr_secs] = bit;
235                 gc_rq->lba_list[nr_secs++] = le64_to_cpu(lba_list[bit]);
236         } while (nr_secs < pblk->max_write_pgs);
237
238         if (unlikely(!nr_secs)) {
239                 kfree(gc_rq);
240                 goto out;
241         }
242
243         gc_rq->nr_secs = nr_secs;
244         gc_rq->line = line;
245
246         gc_rq->data = vmalloc(array_size(gc_rq->nr_secs, geo->csecs));
247         if (!gc_rq->data)
248                 goto fail_free_gc_rq;
249
250         gc_rq_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
251         if (!gc_rq_ws)
252                 goto fail_free_gc_data;
253
254         gc_rq_ws->pblk = pblk;
255         gc_rq_ws->line = line;
256         gc_rq_ws->priv = gc_rq;
257
258         /* The write GC path can be much slower than the read GC one due to
259          * the budget imposed by the rate-limiter. Balance in case that we get
260          * back pressure from the write GC path.
261          */
262         while (down_timeout(&gc->gc_sem, msecs_to_jiffies(30000)))
263                 io_schedule();
264
265         kref_get(&line->ref);
266
267         INIT_WORK(&gc_rq_ws->ws, pblk_gc_line_ws);
268         queue_work(gc->gc_line_reader_wq, &gc_rq_ws->ws);
269
270         sec_left -= nr_secs;
271         if (sec_left > 0)
272                 goto next_rq;
273
274 out:
275         pblk_mfree(lba_list, l_mg->emeta_alloc_type);
276         kfree(line_ws);
277         kfree(invalid_bitmap);
278
279         kref_put(&line->ref, pblk_line_put);
280         atomic_dec(&gc->read_inflight_gc);
281
282         return;
283
284 fail_free_gc_data:
285         vfree(gc_rq->data);
286 fail_free_gc_rq:
287         kfree(gc_rq);
288 fail_free_lba_list:
289         pblk_mfree(lba_list, l_mg->emeta_alloc_type);
290 fail_free_invalid_bitmap:
291         kfree(invalid_bitmap);
292 fail_free_ws:
293         kfree(line_ws);
294
295         /* Line goes back to closed state, so we cannot release additional
296          * reference for line, since we do that only when we want to do
297          * gc to free line state transition.
298          */
299         pblk_put_line_back(pblk, line);
300         atomic_dec(&gc->read_inflight_gc);
301
302         pblk_err(pblk, "failed to GC line %d\n", line->id);
303 }
304
305 static int pblk_gc_line(struct pblk *pblk, struct pblk_line *line)
306 {
307         struct pblk_gc *gc = &pblk->gc;
308         struct pblk_line_ws *line_ws;
309
310         pblk_debug(pblk, "line '%d' being reclaimed for GC\n", line->id);
311
312         line_ws = kmalloc(sizeof(struct pblk_line_ws), GFP_KERNEL);
313         if (!line_ws)
314                 return -ENOMEM;
315
316         line_ws->pblk = pblk;
317         line_ws->line = line;
318
319         atomic_inc(&gc->pipeline_gc);
320         INIT_WORK(&line_ws->ws, pblk_gc_line_prepare_ws);
321         queue_work(gc->gc_reader_wq, &line_ws->ws);
322
323         return 0;
324 }
325
326 static void pblk_gc_reader_kick(struct pblk_gc *gc)
327 {
328         wake_up_process(gc->gc_reader_ts);
329 }
330
331 static void pblk_gc_kick(struct pblk *pblk)
332 {
333         struct pblk_gc *gc = &pblk->gc;
334
335         pblk_gc_writer_kick(gc);
336         pblk_gc_reader_kick(gc);
337
338         /* If we're shutting down GC, let's not start it up again */
339         if (gc->gc_enabled) {
340                 wake_up_process(gc->gc_ts);
341                 mod_timer(&gc->gc_timer,
342                           jiffies + msecs_to_jiffies(GC_TIME_MSECS));
343         }
344 }
345
346 static int pblk_gc_read(struct pblk *pblk)
347 {
348         struct pblk_gc *gc = &pblk->gc;
349         struct pblk_line *line;
350
351         spin_lock(&gc->r_lock);
352         if (list_empty(&gc->r_list)) {
353                 spin_unlock(&gc->r_lock);
354                 return 1;
355         }
356
357         line = list_first_entry(&gc->r_list, struct pblk_line, list);
358         list_del(&line->list);
359         spin_unlock(&gc->r_lock);
360
361         pblk_gc_kick(pblk);
362
363         if (pblk_gc_line(pblk, line)) {
364                 pblk_err(pblk, "failed to GC line %d\n", line->id);
365                 /* rollback */
366                 spin_lock(&gc->r_lock);
367                 list_add_tail(&line->list, &gc->r_list);
368                 spin_unlock(&gc->r_lock);
369         }
370
371         return 0;
372 }
373
374 static struct pblk_line *pblk_gc_get_victim_line(struct pblk *pblk,
375                                                  struct list_head *group_list)
376 {
377         struct pblk_line *line, *victim;
378         unsigned int line_vsc = ~0x0L, victim_vsc = ~0x0L;
379
380         victim = list_first_entry(group_list, struct pblk_line, list);
381
382         list_for_each_entry(line, group_list, list) {
383                 if (!atomic_read(&line->sec_to_update))
384                         line_vsc = le32_to_cpu(*line->vsc);
385                 if (line_vsc < victim_vsc) {
386                         victim = line;
387                         victim_vsc = le32_to_cpu(*victim->vsc);
388                 }
389         }
390
391         if (victim_vsc == ~0x0)
392                 return NULL;
393
394         return victim;
395 }
396
397 static bool pblk_gc_should_run(struct pblk_gc *gc, struct pblk_rl *rl)
398 {
399         unsigned int nr_blocks_free, nr_blocks_need;
400         unsigned int werr_lines = atomic_read(&rl->werr_lines);
401
402         nr_blocks_need = pblk_rl_high_thrs(rl);
403         nr_blocks_free = pblk_rl_nr_free_blks(rl);
404
405         /* This is not critical, no need to take lock here */
406         return ((werr_lines > 0) ||
407                 ((gc->gc_active) && (nr_blocks_need > nr_blocks_free)));
408 }
409
410 void pblk_gc_free_full_lines(struct pblk *pblk)
411 {
412         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
413         struct pblk_gc *gc = &pblk->gc;
414         struct pblk_line *line;
415
416         do {
417                 spin_lock(&l_mg->gc_lock);
418                 if (list_empty(&l_mg->gc_full_list)) {
419                         spin_unlock(&l_mg->gc_lock);
420                         return;
421                 }
422
423                 line = list_first_entry(&l_mg->gc_full_list,
424                                                         struct pblk_line, list);
425
426                 spin_lock(&line->lock);
427                 WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
428                 line->state = PBLK_LINESTATE_GC;
429                 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
430                                         line->state);
431                 spin_unlock(&line->lock);
432
433                 list_del(&line->list);
434                 spin_unlock(&l_mg->gc_lock);
435
436                 atomic_inc(&gc->pipeline_gc);
437                 kref_put(&line->ref, pblk_line_put);
438         } while (1);
439 }
440
441 /*
442  * Lines with no valid sectors will be returned to the free list immediately. If
443  * GC is activated - either because the free block count is under the determined
444  * threshold, or because it is being forced from user space - only lines with a
445  * high count of invalid sectors will be recycled.
446  */
447 static void pblk_gc_run(struct pblk *pblk)
448 {
449         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
450         struct pblk_gc *gc = &pblk->gc;
451         struct pblk_line *line;
452         struct list_head *group_list;
453         bool run_gc;
454         int read_inflight_gc, gc_group = 0, prev_group = 0;
455
456         pblk_gc_free_full_lines(pblk);
457
458         run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
459         if (!run_gc || (atomic_read(&gc->read_inflight_gc) >= PBLK_GC_L_QD))
460                 return;
461
462 next_gc_group:
463         group_list = l_mg->gc_lists[gc_group++];
464
465         do {
466                 spin_lock(&l_mg->gc_lock);
467
468                 line = pblk_gc_get_victim_line(pblk, group_list);
469                 if (!line) {
470                         spin_unlock(&l_mg->gc_lock);
471                         break;
472                 }
473
474                 spin_lock(&line->lock);
475                 WARN_ON(line->state != PBLK_LINESTATE_CLOSED);
476                 line->state = PBLK_LINESTATE_GC;
477                 trace_pblk_line_state(pblk_disk_name(pblk), line->id,
478                                         line->state);
479                 spin_unlock(&line->lock);
480
481                 list_del(&line->list);
482                 spin_unlock(&l_mg->gc_lock);
483
484                 spin_lock(&gc->r_lock);
485                 list_add_tail(&line->list, &gc->r_list);
486                 spin_unlock(&gc->r_lock);
487
488                 read_inflight_gc = atomic_inc_return(&gc->read_inflight_gc);
489                 pblk_gc_reader_kick(gc);
490
491                 prev_group = 1;
492
493                 /* No need to queue up more GC lines than we can handle */
494                 run_gc = pblk_gc_should_run(&pblk->gc, &pblk->rl);
495                 if (!run_gc || read_inflight_gc >= PBLK_GC_L_QD)
496                         break;
497         } while (1);
498
499         if (!prev_group && pblk->rl.rb_state > gc_group &&
500                                                 gc_group < PBLK_GC_NR_LISTS)
501                 goto next_gc_group;
502 }
503
504 static void pblk_gc_timer(struct timer_list *t)
505 {
506         struct pblk *pblk = from_timer(pblk, t, gc.gc_timer);
507
508         pblk_gc_kick(pblk);
509 }
510
511 static int pblk_gc_ts(void *data)
512 {
513         struct pblk *pblk = data;
514
515         while (!kthread_should_stop()) {
516                 pblk_gc_run(pblk);
517                 set_current_state(TASK_INTERRUPTIBLE);
518                 io_schedule();
519         }
520
521         return 0;
522 }
523
524 static int pblk_gc_writer_ts(void *data)
525 {
526         struct pblk *pblk = data;
527
528         while (!kthread_should_stop()) {
529                 if (!pblk_gc_write(pblk))
530                         continue;
531                 set_current_state(TASK_INTERRUPTIBLE);
532                 io_schedule();
533         }
534
535         return 0;
536 }
537
538 static int pblk_gc_reader_ts(void *data)
539 {
540         struct pblk *pblk = data;
541         struct pblk_gc *gc = &pblk->gc;
542
543         while (!kthread_should_stop()) {
544                 if (!pblk_gc_read(pblk))
545                         continue;
546                 set_current_state(TASK_INTERRUPTIBLE);
547                 io_schedule();
548         }
549
550 #ifdef CONFIG_NVM_PBLK_DEBUG
551         pblk_info(pblk, "flushing gc pipeline, %d lines left\n",
552                 atomic_read(&gc->pipeline_gc));
553 #endif
554
555         do {
556                 if (!atomic_read(&gc->pipeline_gc))
557                         break;
558
559                 schedule();
560         } while (1);
561
562         return 0;
563 }
564
565 static void pblk_gc_start(struct pblk *pblk)
566 {
567         pblk->gc.gc_active = 1;
568         pblk_debug(pblk, "gc start\n");
569 }
570
571 void pblk_gc_should_start(struct pblk *pblk)
572 {
573         struct pblk_gc *gc = &pblk->gc;
574
575         if (gc->gc_enabled && !gc->gc_active) {
576                 pblk_gc_start(pblk);
577                 pblk_gc_kick(pblk);
578         }
579 }
580
581 void pblk_gc_should_stop(struct pblk *pblk)
582 {
583         struct pblk_gc *gc = &pblk->gc;
584
585         if (gc->gc_active && !gc->gc_forced)
586                 gc->gc_active = 0;
587 }
588
589 void pblk_gc_should_kick(struct pblk *pblk)
590 {
591         pblk_rl_update_rates(&pblk->rl);
592 }
593
594 void pblk_gc_sysfs_state_show(struct pblk *pblk, int *gc_enabled,
595                               int *gc_active)
596 {
597         struct pblk_gc *gc = &pblk->gc;
598
599         spin_lock(&gc->lock);
600         *gc_enabled = gc->gc_enabled;
601         *gc_active = gc->gc_active;
602         spin_unlock(&gc->lock);
603 }
604
605 int pblk_gc_sysfs_force(struct pblk *pblk, int force)
606 {
607         struct pblk_gc *gc = &pblk->gc;
608
609         if (force < 0 || force > 1)
610                 return -EINVAL;
611
612         spin_lock(&gc->lock);
613         gc->gc_forced = force;
614
615         if (force)
616                 gc->gc_enabled = 1;
617         else
618                 gc->gc_enabled = 0;
619         spin_unlock(&gc->lock);
620
621         pblk_gc_should_start(pblk);
622
623         return 0;
624 }
625
626 int pblk_gc_init(struct pblk *pblk)
627 {
628         struct pblk_gc *gc = &pblk->gc;
629         int ret;
630
631         gc->gc_ts = kthread_create(pblk_gc_ts, pblk, "pblk-gc-ts");
632         if (IS_ERR(gc->gc_ts)) {
633                 pblk_err(pblk, "could not allocate GC main kthread\n");
634                 return PTR_ERR(gc->gc_ts);
635         }
636
637         gc->gc_writer_ts = kthread_create(pblk_gc_writer_ts, pblk,
638                                                         "pblk-gc-writer-ts");
639         if (IS_ERR(gc->gc_writer_ts)) {
640                 pblk_err(pblk, "could not allocate GC writer kthread\n");
641                 ret = PTR_ERR(gc->gc_writer_ts);
642                 goto fail_free_main_kthread;
643         }
644
645         gc->gc_reader_ts = kthread_create(pblk_gc_reader_ts, pblk,
646                                                         "pblk-gc-reader-ts");
647         if (IS_ERR(gc->gc_reader_ts)) {
648                 pblk_err(pblk, "could not allocate GC reader kthread\n");
649                 ret = PTR_ERR(gc->gc_reader_ts);
650                 goto fail_free_writer_kthread;
651         }
652
653         timer_setup(&gc->gc_timer, pblk_gc_timer, 0);
654         mod_timer(&gc->gc_timer, jiffies + msecs_to_jiffies(GC_TIME_MSECS));
655
656         gc->gc_active = 0;
657         gc->gc_forced = 0;
658         gc->gc_enabled = 1;
659         gc->w_entries = 0;
660         atomic_set(&gc->read_inflight_gc, 0);
661         atomic_set(&gc->pipeline_gc, 0);
662
663         /* Workqueue that reads valid sectors from a line and submit them to the
664          * GC writer to be recycled.
665          */
666         gc->gc_line_reader_wq = alloc_workqueue("pblk-gc-line-reader-wq",
667                         WQ_MEM_RECLAIM | WQ_UNBOUND, PBLK_GC_MAX_READERS);
668         if (!gc->gc_line_reader_wq) {
669                 pblk_err(pblk, "could not allocate GC line reader workqueue\n");
670                 ret = -ENOMEM;
671                 goto fail_free_reader_kthread;
672         }
673
674         /* Workqueue that prepare lines for GC */
675         gc->gc_reader_wq = alloc_workqueue("pblk-gc-line_wq",
676                                         WQ_MEM_RECLAIM | WQ_UNBOUND, 1);
677         if (!gc->gc_reader_wq) {
678                 pblk_err(pblk, "could not allocate GC reader workqueue\n");
679                 ret = -ENOMEM;
680                 goto fail_free_reader_line_wq;
681         }
682
683         spin_lock_init(&gc->lock);
684         spin_lock_init(&gc->w_lock);
685         spin_lock_init(&gc->r_lock);
686
687         sema_init(&gc->gc_sem, PBLK_GC_RQ_QD);
688
689         INIT_LIST_HEAD(&gc->w_list);
690         INIT_LIST_HEAD(&gc->r_list);
691
692         return 0;
693
694 fail_free_reader_line_wq:
695         destroy_workqueue(gc->gc_line_reader_wq);
696 fail_free_reader_kthread:
697         kthread_stop(gc->gc_reader_ts);
698 fail_free_writer_kthread:
699         kthread_stop(gc->gc_writer_ts);
700 fail_free_main_kthread:
701         kthread_stop(gc->gc_ts);
702
703         return ret;
704 }
705
706 void pblk_gc_exit(struct pblk *pblk, bool graceful)
707 {
708         struct pblk_gc *gc = &pblk->gc;
709
710         gc->gc_enabled = 0;
711         del_timer_sync(&gc->gc_timer);
712         gc->gc_active = 0;
713
714         if (gc->gc_ts)
715                 kthread_stop(gc->gc_ts);
716
717         if (gc->gc_reader_ts)
718                 kthread_stop(gc->gc_reader_ts);
719
720         if (graceful) {
721                 flush_workqueue(gc->gc_reader_wq);
722                 flush_workqueue(gc->gc_line_reader_wq);
723         }
724
725         destroy_workqueue(gc->gc_reader_wq);
726         destroy_workqueue(gc->gc_line_reader_wq);
727
728         if (gc->gc_writer_ts)
729                 kthread_stop(gc->gc_writer_ts);
730 }