Merge tag 'mailbox-v5.1' of git://git.linaro.org/landing-teams/working/fujitsu/integr...
[linux-2.6-microblaze.git] / drivers / lightnvm / pblk-write.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2016 CNEX Labs
4  * Initial release: Javier Gonzalez <javier@cnexlabs.com>
5  *                  Matias Bjorling <matias@cnexlabs.com>
6  *
7  * This program is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU General Public License version
9  * 2 as published by the Free Software Foundation.
10  *
11  * This program is distributed in the hope that it will be useful, but
12  * WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14  * General Public License for more details.
15  *
16  * pblk-write.c - pblk's write path from write buffer to media
17  */
18
19 #include "pblk.h"
20 #include "pblk-trace.h"
21
22 static unsigned long pblk_end_w_bio(struct pblk *pblk, struct nvm_rq *rqd,
23                                     struct pblk_c_ctx *c_ctx)
24 {
25         struct bio *original_bio;
26         struct pblk_rb *rwb = &pblk->rwb;
27         unsigned long ret;
28         int i;
29
30         for (i = 0; i < c_ctx->nr_valid; i++) {
31                 struct pblk_w_ctx *w_ctx;
32                 int pos = c_ctx->sentry + i;
33                 int flags;
34
35                 w_ctx = pblk_rb_w_ctx(rwb, pos);
36                 flags = READ_ONCE(w_ctx->flags);
37
38                 if (flags & PBLK_FLUSH_ENTRY) {
39                         flags &= ~PBLK_FLUSH_ENTRY;
40                         /* Release flags on context. Protect from writes */
41                         smp_store_release(&w_ctx->flags, flags);
42
43 #ifdef CONFIG_NVM_PBLK_DEBUG
44                         atomic_dec(&rwb->inflight_flush_point);
45 #endif
46                 }
47
48                 while ((original_bio = bio_list_pop(&w_ctx->bios)))
49                         bio_endio(original_bio);
50         }
51
52         if (c_ctx->nr_padded)
53                 pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
54                                                         c_ctx->nr_padded);
55
56 #ifdef CONFIG_NVM_PBLK_DEBUG
57         atomic_long_add(rqd->nr_ppas, &pblk->sync_writes);
58 #endif
59
60         ret = pblk_rb_sync_advance(&pblk->rwb, c_ctx->nr_valid);
61
62         bio_put(rqd->bio);
63         pblk_free_rqd(pblk, rqd, PBLK_WRITE);
64
65         return ret;
66 }
67
68 static unsigned long pblk_end_queued_w_bio(struct pblk *pblk,
69                                            struct nvm_rq *rqd,
70                                            struct pblk_c_ctx *c_ctx)
71 {
72         list_del(&c_ctx->list);
73         return pblk_end_w_bio(pblk, rqd, c_ctx);
74 }
75
76 static void pblk_complete_write(struct pblk *pblk, struct nvm_rq *rqd,
77                                 struct pblk_c_ctx *c_ctx)
78 {
79         struct pblk_c_ctx *c, *r;
80         unsigned long flags;
81         unsigned long pos;
82
83 #ifdef CONFIG_NVM_PBLK_DEBUG
84         atomic_long_sub(c_ctx->nr_valid, &pblk->inflight_writes);
85 #endif
86         pblk_up_rq(pblk, c_ctx->lun_bitmap);
87
88         pos = pblk_rb_sync_init(&pblk->rwb, &flags);
89         if (pos == c_ctx->sentry) {
90                 pos = pblk_end_w_bio(pblk, rqd, c_ctx);
91
92 retry:
93                 list_for_each_entry_safe(c, r, &pblk->compl_list, list) {
94                         rqd = nvm_rq_from_c_ctx(c);
95                         if (c->sentry == pos) {
96                                 pos = pblk_end_queued_w_bio(pblk, rqd, c);
97                                 goto retry;
98                         }
99                 }
100         } else {
101                 WARN_ON(nvm_rq_from_c_ctx(c_ctx) != rqd);
102                 list_add_tail(&c_ctx->list, &pblk->compl_list);
103         }
104         pblk_rb_sync_end(&pblk->rwb, &flags);
105 }
106
107 /* Map remaining sectors in chunk, starting from ppa */
108 static void pblk_map_remaining(struct pblk *pblk, struct ppa_addr *ppa,
109                 int rqd_ppas)
110 {
111         struct pblk_line *line;
112         struct ppa_addr map_ppa = *ppa;
113         __le64 addr_empty = cpu_to_le64(ADDR_EMPTY);
114         __le64 *lba_list;
115         u64 paddr;
116         int done = 0;
117         int n = 0;
118
119         line = pblk_ppa_to_line(pblk, *ppa);
120         lba_list = emeta_to_lbas(pblk, line->emeta->buf);
121
122         spin_lock(&line->lock);
123
124         while (!done)  {
125                 paddr = pblk_dev_ppa_to_line_addr(pblk, map_ppa);
126
127                 if (!test_and_set_bit(paddr, line->map_bitmap))
128                         line->left_msecs--;
129
130                 if (n < rqd_ppas && lba_list[paddr] != addr_empty)
131                         line->nr_valid_lbas--;
132
133                 lba_list[paddr] = addr_empty;
134
135                 if (!test_and_set_bit(paddr, line->invalid_bitmap))
136                         le32_add_cpu(line->vsc, -1);
137
138                 done = nvm_next_ppa_in_chk(pblk->dev, &map_ppa);
139
140                 n++;
141         }
142
143         line->w_err_gc->has_write_err = 1;
144         spin_unlock(&line->lock);
145 }
146
147 static void pblk_prepare_resubmit(struct pblk *pblk, unsigned int sentry,
148                                   unsigned int nr_entries)
149 {
150         struct pblk_rb *rb = &pblk->rwb;
151         struct pblk_rb_entry *entry;
152         struct pblk_line *line;
153         struct pblk_w_ctx *w_ctx;
154         struct ppa_addr ppa_l2p;
155         int flags;
156         unsigned int i;
157
158         spin_lock(&pblk->trans_lock);
159         for (i = 0; i < nr_entries; i++) {
160                 entry = &rb->entries[pblk_rb_ptr_wrap(rb, sentry, i)];
161                 w_ctx = &entry->w_ctx;
162
163                 /* Check if the lba has been overwritten */
164                 if (w_ctx->lba != ADDR_EMPTY) {
165                         ppa_l2p = pblk_trans_map_get(pblk, w_ctx->lba);
166                         if (!pblk_ppa_comp(ppa_l2p, entry->cacheline))
167                                 w_ctx->lba = ADDR_EMPTY;
168                 }
169
170                 /* Mark up the entry as submittable again */
171                 flags = READ_ONCE(w_ctx->flags);
172                 flags |= PBLK_WRITTEN_DATA;
173                 /* Release flags on write context. Protect from writes */
174                 smp_store_release(&w_ctx->flags, flags);
175
176                 /* Decrease the reference count to the line as we will
177                  * re-map these entries
178                  */
179                 line = pblk_ppa_to_line(pblk, w_ctx->ppa);
180                 atomic_dec(&line->sec_to_update);
181                 kref_put(&line->ref, pblk_line_put);
182         }
183         spin_unlock(&pblk->trans_lock);
184 }
185
186 static void pblk_queue_resubmit(struct pblk *pblk, struct pblk_c_ctx *c_ctx)
187 {
188         struct pblk_c_ctx *r_ctx;
189
190         r_ctx = kzalloc(sizeof(struct pblk_c_ctx), GFP_KERNEL);
191         if (!r_ctx)
192                 return;
193
194         r_ctx->lun_bitmap = NULL;
195         r_ctx->sentry = c_ctx->sentry;
196         r_ctx->nr_valid = c_ctx->nr_valid;
197         r_ctx->nr_padded = c_ctx->nr_padded;
198
199         spin_lock(&pblk->resubmit_lock);
200         list_add_tail(&r_ctx->list, &pblk->resubmit_list);
201         spin_unlock(&pblk->resubmit_lock);
202
203 #ifdef CONFIG_NVM_PBLK_DEBUG
204         atomic_long_add(c_ctx->nr_valid, &pblk->recov_writes);
205 #endif
206 }
207
208 static void pblk_submit_rec(struct work_struct *work)
209 {
210         struct pblk_rec_ctx *recovery =
211                         container_of(work, struct pblk_rec_ctx, ws_rec);
212         struct pblk *pblk = recovery->pblk;
213         struct nvm_rq *rqd = recovery->rqd;
214         struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
215         struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
216
217         pblk_log_write_err(pblk, rqd);
218
219         pblk_map_remaining(pblk, ppa_list, rqd->nr_ppas);
220         pblk_queue_resubmit(pblk, c_ctx);
221
222         pblk_up_rq(pblk, c_ctx->lun_bitmap);
223         if (c_ctx->nr_padded)
224                 pblk_bio_free_pages(pblk, rqd->bio, c_ctx->nr_valid,
225                                                         c_ctx->nr_padded);
226         bio_put(rqd->bio);
227         pblk_free_rqd(pblk, rqd, PBLK_WRITE);
228         mempool_free(recovery, &pblk->rec_pool);
229
230         atomic_dec(&pblk->inflight_io);
231 }
232
233
234 static void pblk_end_w_fail(struct pblk *pblk, struct nvm_rq *rqd)
235 {
236         struct pblk_rec_ctx *recovery;
237
238         recovery = mempool_alloc(&pblk->rec_pool, GFP_ATOMIC);
239         if (!recovery) {
240                 pblk_err(pblk, "could not allocate recovery work\n");
241                 return;
242         }
243
244         recovery->pblk = pblk;
245         recovery->rqd = rqd;
246
247         INIT_WORK(&recovery->ws_rec, pblk_submit_rec);
248         queue_work(pblk->close_wq, &recovery->ws_rec);
249 }
250
251 static void pblk_end_io_write(struct nvm_rq *rqd)
252 {
253         struct pblk *pblk = rqd->private;
254         struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
255
256         if (rqd->error) {
257                 pblk_end_w_fail(pblk, rqd);
258                 return;
259         } else {
260                 if (trace_pblk_chunk_state_enabled())
261                         pblk_check_chunk_state_update(pblk, rqd);
262 #ifdef CONFIG_NVM_PBLK_DEBUG
263                 WARN_ONCE(rqd->bio->bi_status, "pblk: corrupted write error\n");
264 #endif
265         }
266
267         pblk_complete_write(pblk, rqd, c_ctx);
268         atomic_dec(&pblk->inflight_io);
269 }
270
271 static void pblk_end_io_write_meta(struct nvm_rq *rqd)
272 {
273         struct pblk *pblk = rqd->private;
274         struct pblk_g_ctx *m_ctx = nvm_rq_to_pdu(rqd);
275         struct pblk_line *line = m_ctx->private;
276         struct pblk_emeta *emeta = line->emeta;
277         struct ppa_addr *ppa_list = nvm_rq_to_ppa_list(rqd);
278         int sync;
279
280         pblk_up_chunk(pblk, ppa_list[0]);
281
282         if (rqd->error) {
283                 pblk_log_write_err(pblk, rqd);
284                 pblk_err(pblk, "metadata I/O failed. Line %d\n", line->id);
285                 line->w_err_gc->has_write_err = 1;
286         } else {
287                 if (trace_pblk_chunk_state_enabled())
288                         pblk_check_chunk_state_update(pblk, rqd);
289         }
290
291         sync = atomic_add_return(rqd->nr_ppas, &emeta->sync);
292         if (sync == emeta->nr_entries)
293                 pblk_gen_run_ws(pblk, line, NULL, pblk_line_close_ws,
294                                                 GFP_ATOMIC, pblk->close_wq);
295
296         pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
297
298         atomic_dec(&pblk->inflight_io);
299 }
300
301 static int pblk_alloc_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
302                            unsigned int nr_secs, nvm_end_io_fn(*end_io))
303 {
304         /* Setup write request */
305         rqd->opcode = NVM_OP_PWRITE;
306         rqd->nr_ppas = nr_secs;
307         rqd->is_seq = 1;
308         rqd->private = pblk;
309         rqd->end_io = end_io;
310
311         return pblk_alloc_rqd_meta(pblk, rqd);
312 }
313
314 static int pblk_setup_w_rq(struct pblk *pblk, struct nvm_rq *rqd,
315                            struct ppa_addr *erase_ppa)
316 {
317         struct pblk_line_meta *lm = &pblk->lm;
318         struct pblk_line *e_line = pblk_line_get_erase(pblk);
319         struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
320         unsigned int valid = c_ctx->nr_valid;
321         unsigned int padded = c_ctx->nr_padded;
322         unsigned int nr_secs = valid + padded;
323         unsigned long *lun_bitmap;
324         int ret;
325
326         lun_bitmap = kzalloc(lm->lun_bitmap_len, GFP_KERNEL);
327         if (!lun_bitmap)
328                 return -ENOMEM;
329         c_ctx->lun_bitmap = lun_bitmap;
330
331         ret = pblk_alloc_w_rq(pblk, rqd, nr_secs, pblk_end_io_write);
332         if (ret) {
333                 kfree(lun_bitmap);
334                 return ret;
335         }
336
337         if (likely(!e_line || !atomic_read(&e_line->left_eblks)))
338                 ret = pblk_map_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
339                                                         valid, 0);
340         else
341                 ret = pblk_map_erase_rq(pblk, rqd, c_ctx->sentry, lun_bitmap,
342                                                         valid, erase_ppa);
343
344         return ret;
345 }
346
347 static int pblk_calc_secs_to_sync(struct pblk *pblk, unsigned int secs_avail,
348                                   unsigned int secs_to_flush)
349 {
350         int secs_to_sync;
351
352         secs_to_sync = pblk_calc_secs(pblk, secs_avail, secs_to_flush, true);
353
354 #ifdef CONFIG_NVM_PBLK_DEBUG
355         if ((!secs_to_sync && secs_to_flush)
356                         || (secs_to_sync < 0)
357                         || (secs_to_sync > secs_avail && !secs_to_flush)) {
358                 pblk_err(pblk, "bad sector calculation (a:%d,s:%d,f:%d)\n",
359                                 secs_avail, secs_to_sync, secs_to_flush);
360         }
361 #endif
362
363         return secs_to_sync;
364 }
365
366 int pblk_submit_meta_io(struct pblk *pblk, struct pblk_line *meta_line)
367 {
368         struct nvm_tgt_dev *dev = pblk->dev;
369         struct nvm_geo *geo = &dev->geo;
370         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
371         struct pblk_line_meta *lm = &pblk->lm;
372         struct pblk_emeta *emeta = meta_line->emeta;
373         struct ppa_addr *ppa_list;
374         struct pblk_g_ctx *m_ctx;
375         struct bio *bio;
376         struct nvm_rq *rqd;
377         void *data;
378         u64 paddr;
379         int rq_ppas = pblk->min_write_pgs;
380         int id = meta_line->id;
381         int rq_len;
382         int i, j;
383         int ret;
384
385         rqd = pblk_alloc_rqd(pblk, PBLK_WRITE_INT);
386
387         m_ctx = nvm_rq_to_pdu(rqd);
388         m_ctx->private = meta_line;
389
390         rq_len = rq_ppas * geo->csecs;
391         data = ((void *)emeta->buf) + emeta->mem;
392
393         bio = pblk_bio_map_addr(pblk, data, rq_ppas, rq_len,
394                                         l_mg->emeta_alloc_type, GFP_KERNEL);
395         if (IS_ERR(bio)) {
396                 pblk_err(pblk, "failed to map emeta io");
397                 ret = PTR_ERR(bio);
398                 goto fail_free_rqd;
399         }
400         bio->bi_iter.bi_sector = 0; /* internal bio */
401         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
402         rqd->bio = bio;
403
404         ret = pblk_alloc_w_rq(pblk, rqd, rq_ppas, pblk_end_io_write_meta);
405         if (ret)
406                 goto fail_free_bio;
407
408         ppa_list = nvm_rq_to_ppa_list(rqd);
409         for (i = 0; i < rqd->nr_ppas; ) {
410                 spin_lock(&meta_line->lock);
411                 paddr = __pblk_alloc_page(pblk, meta_line, rq_ppas);
412                 spin_unlock(&meta_line->lock);
413                 for (j = 0; j < rq_ppas; j++, i++, paddr++)
414                         ppa_list[i] = addr_to_gen_ppa(pblk, paddr, id);
415         }
416
417         spin_lock(&l_mg->close_lock);
418         emeta->mem += rq_len;
419         if (emeta->mem >= lm->emeta_len[0])
420                 list_del(&meta_line->list);
421         spin_unlock(&l_mg->close_lock);
422
423         pblk_down_chunk(pblk, ppa_list[0]);
424
425         ret = pblk_submit_io(pblk, rqd);
426         if (ret) {
427                 pblk_err(pblk, "emeta I/O submission failed: %d\n", ret);
428                 goto fail_rollback;
429         }
430
431         return NVM_IO_OK;
432
433 fail_rollback:
434         pblk_up_chunk(pblk, ppa_list[0]);
435         spin_lock(&l_mg->close_lock);
436         pblk_dealloc_page(pblk, meta_line, rq_ppas);
437         list_add(&meta_line->list, &meta_line->list);
438         spin_unlock(&l_mg->close_lock);
439 fail_free_bio:
440         bio_put(bio);
441 fail_free_rqd:
442         pblk_free_rqd(pblk, rqd, PBLK_WRITE_INT);
443         return ret;
444 }
445
446 static inline bool pblk_valid_meta_ppa(struct pblk *pblk,
447                                        struct pblk_line *meta_line,
448                                        struct nvm_rq *data_rqd)
449 {
450         struct nvm_tgt_dev *dev = pblk->dev;
451         struct nvm_geo *geo = &dev->geo;
452         struct pblk_c_ctx *data_c_ctx = nvm_rq_to_pdu(data_rqd);
453         struct pblk_line *data_line = pblk_line_get_data(pblk);
454         struct ppa_addr ppa, ppa_opt;
455         u64 paddr;
456         int pos_opt;
457
458         /* Schedule a metadata I/O that is half the distance from the data I/O
459          * with regards to the number of LUNs forming the pblk instance. This
460          * balances LUN conflicts across every I/O.
461          *
462          * When the LUN configuration changes (e.g., due to GC), this distance
463          * can align, which would result on metadata and data I/Os colliding. In
464          * this case, modify the distance to not be optimal, but move the
465          * optimal in the right direction.
466          */
467         paddr = pblk_lookup_page(pblk, meta_line);
468         ppa = addr_to_gen_ppa(pblk, paddr, 0);
469         ppa_opt = addr_to_gen_ppa(pblk, paddr + data_line->meta_distance, 0);
470         pos_opt = pblk_ppa_to_pos(geo, ppa_opt);
471
472         if (test_bit(pos_opt, data_c_ctx->lun_bitmap) ||
473                                 test_bit(pos_opt, data_line->blk_bitmap))
474                 return true;
475
476         if (unlikely(pblk_ppa_comp(ppa_opt, ppa)))
477                 data_line->meta_distance--;
478
479         return false;
480 }
481
482 static struct pblk_line *pblk_should_submit_meta_io(struct pblk *pblk,
483                                                     struct nvm_rq *data_rqd)
484 {
485         struct pblk_line_meta *lm = &pblk->lm;
486         struct pblk_line_mgmt *l_mg = &pblk->l_mg;
487         struct pblk_line *meta_line;
488
489         spin_lock(&l_mg->close_lock);
490         if (list_empty(&l_mg->emeta_list)) {
491                 spin_unlock(&l_mg->close_lock);
492                 return NULL;
493         }
494         meta_line = list_first_entry(&l_mg->emeta_list, struct pblk_line, list);
495         if (meta_line->emeta->mem >= lm->emeta_len[0]) {
496                 spin_unlock(&l_mg->close_lock);
497                 return NULL;
498         }
499         spin_unlock(&l_mg->close_lock);
500
501         if (!pblk_valid_meta_ppa(pblk, meta_line, data_rqd))
502                 return NULL;
503
504         return meta_line;
505 }
506
507 static int pblk_submit_io_set(struct pblk *pblk, struct nvm_rq *rqd)
508 {
509         struct ppa_addr erase_ppa;
510         struct pblk_line *meta_line;
511         int err;
512
513         pblk_ppa_set_empty(&erase_ppa);
514
515         /* Assign lbas to ppas and populate request structure */
516         err = pblk_setup_w_rq(pblk, rqd, &erase_ppa);
517         if (err) {
518                 pblk_err(pblk, "could not setup write request: %d\n", err);
519                 return NVM_IO_ERR;
520         }
521
522         meta_line = pblk_should_submit_meta_io(pblk, rqd);
523
524         /* Submit data write for current data line */
525         err = pblk_submit_io(pblk, rqd);
526         if (err) {
527                 pblk_err(pblk, "data I/O submission failed: %d\n", err);
528                 return NVM_IO_ERR;
529         }
530
531         if (!pblk_ppa_empty(erase_ppa)) {
532                 /* Submit erase for next data line */
533                 if (pblk_blk_erase_async(pblk, erase_ppa)) {
534                         struct pblk_line *e_line = pblk_line_get_erase(pblk);
535                         struct nvm_tgt_dev *dev = pblk->dev;
536                         struct nvm_geo *geo = &dev->geo;
537                         int bit;
538
539                         atomic_inc(&e_line->left_eblks);
540                         bit = pblk_ppa_to_pos(geo, erase_ppa);
541                         WARN_ON(!test_and_clear_bit(bit, e_line->erase_bitmap));
542                 }
543         }
544
545         if (meta_line) {
546                 /* Submit metadata write for previous data line */
547                 err = pblk_submit_meta_io(pblk, meta_line);
548                 if (err) {
549                         pblk_err(pblk, "metadata I/O submission failed: %d",
550                                         err);
551                         return NVM_IO_ERR;
552                 }
553         }
554
555         return NVM_IO_OK;
556 }
557
558 static void pblk_free_write_rqd(struct pblk *pblk, struct nvm_rq *rqd)
559 {
560         struct pblk_c_ctx *c_ctx = nvm_rq_to_pdu(rqd);
561         struct bio *bio = rqd->bio;
562
563         if (c_ctx->nr_padded)
564                 pblk_bio_free_pages(pblk, bio, c_ctx->nr_valid,
565                                                         c_ctx->nr_padded);
566 }
567
568 static int pblk_submit_write(struct pblk *pblk, int *secs_left)
569 {
570         struct bio *bio;
571         struct nvm_rq *rqd;
572         unsigned int secs_avail, secs_to_sync, secs_to_com;
573         unsigned int secs_to_flush, packed_meta_pgs;
574         unsigned long pos;
575         unsigned int resubmit;
576
577         *secs_left = 0;
578
579         spin_lock(&pblk->resubmit_lock);
580         resubmit = !list_empty(&pblk->resubmit_list);
581         spin_unlock(&pblk->resubmit_lock);
582
583         /* Resubmit failed writes first */
584         if (resubmit) {
585                 struct pblk_c_ctx *r_ctx;
586
587                 spin_lock(&pblk->resubmit_lock);
588                 r_ctx = list_first_entry(&pblk->resubmit_list,
589                                         struct pblk_c_ctx, list);
590                 list_del(&r_ctx->list);
591                 spin_unlock(&pblk->resubmit_lock);
592
593                 secs_avail = r_ctx->nr_valid;
594                 pos = r_ctx->sentry;
595
596                 pblk_prepare_resubmit(pblk, pos, secs_avail);
597                 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
598                                 secs_avail);
599
600                 kfree(r_ctx);
601         } else {
602                 /* If there are no sectors in the cache,
603                  * flushes (bios without data) will be cleared on
604                  * the cache threads
605                  */
606                 secs_avail = pblk_rb_read_count(&pblk->rwb);
607                 if (!secs_avail)
608                         return 0;
609
610                 secs_to_flush = pblk_rb_flush_point_count(&pblk->rwb);
611                 if (!secs_to_flush && secs_avail < pblk->min_write_pgs_data)
612                         return 0;
613
614                 secs_to_sync = pblk_calc_secs_to_sync(pblk, secs_avail,
615                                         secs_to_flush);
616                 if (secs_to_sync > pblk->max_write_pgs) {
617                         pblk_err(pblk, "bad buffer sync calculation\n");
618                         return 0;
619                 }
620
621                 secs_to_com = (secs_to_sync > secs_avail) ?
622                         secs_avail : secs_to_sync;
623                 pos = pblk_rb_read_commit(&pblk->rwb, secs_to_com);
624         }
625
626         packed_meta_pgs = (pblk->min_write_pgs - pblk->min_write_pgs_data);
627         bio = bio_alloc(GFP_KERNEL, secs_to_sync + packed_meta_pgs);
628
629         bio->bi_iter.bi_sector = 0; /* internal bio */
630         bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
631
632         rqd = pblk_alloc_rqd(pblk, PBLK_WRITE);
633         rqd->bio = bio;
634
635         if (pblk_rb_read_to_bio(&pblk->rwb, rqd, pos, secs_to_sync,
636                                                                 secs_avail)) {
637                 pblk_err(pblk, "corrupted write bio\n");
638                 goto fail_put_bio;
639         }
640
641         if (pblk_submit_io_set(pblk, rqd))
642                 goto fail_free_bio;
643
644 #ifdef CONFIG_NVM_PBLK_DEBUG
645         atomic_long_add(secs_to_sync, &pblk->sub_writes);
646 #endif
647
648         *secs_left = 1;
649         return 0;
650
651 fail_free_bio:
652         pblk_free_write_rqd(pblk, rqd);
653 fail_put_bio:
654         bio_put(bio);
655         pblk_free_rqd(pblk, rqd, PBLK_WRITE);
656
657         return -EINTR;
658 }
659
660 int pblk_write_ts(void *data)
661 {
662         struct pblk *pblk = data;
663         int secs_left;
664         int write_failure = 0;
665
666         while (!kthread_should_stop()) {
667                 if (!write_failure) {
668                         write_failure = pblk_submit_write(pblk, &secs_left);
669
670                         if (secs_left)
671                                 continue;
672                 }
673                 set_current_state(TASK_INTERRUPTIBLE);
674                 io_schedule();
675         }
676
677         return 0;
678 }