Merge branch 'vmwgfx-fixes-5.1' of git://people.freedesktop.org/~thomash/linux into...
[linux-2.6-microblaze.git] / drivers / mtd / ubi / wl.c
1 /*
2  * Copyright (c) International Business Machines Corp., 2006
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of the GNU General Public License as published by
6  * the Free Software Foundation; either version 2 of the License, or
7  * (at your option) any later version.
8  *
9  * This program is distributed in the hope that it will be useful,
10  * but WITHOUT ANY WARRANTY; without even the implied warranty of
11  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
12  * the GNU General Public License for more details.
13  *
14  * You should have received a copy of the GNU General Public License
15  * along with this program; if not, write to the Free Software
16  * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17  *
18  * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
19  */
20
21 /*
22  * UBI wear-leveling sub-system.
23  *
24  * This sub-system is responsible for wear-leveling. It works in terms of
25  * physical eraseblocks and erase counters and knows nothing about logical
26  * eraseblocks, volumes, etc. From this sub-system's perspective all physical
27  * eraseblocks are of two types - used and free. Used physical eraseblocks are
28  * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
29  * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
30  *
31  * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
32  * header. The rest of the physical eraseblock contains only %0xFF bytes.
33  *
34  * When physical eraseblocks are returned to the WL sub-system by means of the
35  * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
36  * done asynchronously in context of the per-UBI device background thread,
37  * which is also managed by the WL sub-system.
38  *
39  * The wear-leveling is ensured by means of moving the contents of used
40  * physical eraseblocks with low erase counter to free physical eraseblocks
41  * with high erase counter.
42  *
43  * If the WL sub-system fails to erase a physical eraseblock, it marks it as
44  * bad.
45  *
46  * This sub-system is also responsible for scrubbing. If a bit-flip is detected
47  * in a physical eraseblock, it has to be moved. Technically this is the same
48  * as moving it for wear-leveling reasons.
49  *
50  * As it was said, for the UBI sub-system all physical eraseblocks are either
51  * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
52  * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
53  * RB-trees, as well as (temporarily) in the @wl->pq queue.
54  *
55  * When the WL sub-system returns a physical eraseblock, the physical
56  * eraseblock is protected from being moved for some "time". For this reason,
57  * the physical eraseblock is not directly moved from the @wl->free tree to the
58  * @wl->used tree. There is a protection queue in between where this
59  * physical eraseblock is temporarily stored (@wl->pq).
60  *
61  * All this protection stuff is needed because:
62  *  o we don't want to move physical eraseblocks just after we have given them
63  *    to the user; instead, we first want to let users fill them up with data;
64  *
65  *  o there is a chance that the user will put the physical eraseblock very
66  *    soon, so it makes sense not to move it for some time, but wait.
67  *
68  * Physical eraseblocks stay protected only for limited time. But the "time" is
69  * measured in erase cycles in this case. This is implemented with help of the
70  * protection queue. Eraseblocks are put to the tail of this queue when they
71  * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
72  * head of the queue on each erase operation (for any eraseblock). So the
73  * length of the queue defines how may (global) erase cycles PEBs are protected.
74  *
75  * To put it differently, each physical eraseblock has 2 main states: free and
76  * used. The former state corresponds to the @wl->free tree. The latter state
77  * is split up on several sub-states:
78  * o the WL movement is allowed (@wl->used tree);
79  * o the WL movement is disallowed (@wl->erroneous) because the PEB is
80  *   erroneous - e.g., there was a read error;
81  * o the WL movement is temporarily prohibited (@wl->pq queue);
82  * o scrubbing is needed (@wl->scrub tree).
83  *
84  * Depending on the sub-state, wear-leveling entries of the used physical
85  * eraseblocks may be kept in one of those structures.
86  *
87  * Note, in this implementation, we keep a small in-RAM object for each physical
88  * eraseblock. This is surely not a scalable solution. But it appears to be good
89  * enough for moderately large flashes and it is simple. In future, one may
90  * re-work this sub-system and make it more scalable.
91  *
92  * At the moment this sub-system does not utilize the sequence number, which
93  * was introduced relatively recently. But it would be wise to do this because
94  * the sequence number of a logical eraseblock characterizes how old is it. For
95  * example, when we move a PEB with low erase counter, and we need to pick the
96  * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
97  * pick target PEB with an average EC if our PEB is not very "old". This is a
98  * room for future re-works of the WL sub-system.
99  */
100
101 #include <linux/slab.h>
102 #include <linux/crc32.h>
103 #include <linux/freezer.h>
104 #include <linux/kthread.h>
105 #include "ubi.h"
106 #include "wl.h"
107
108 /* Number of physical eraseblocks reserved for wear-leveling purposes */
109 #define WL_RESERVED_PEBS 1
110
111 /*
112  * Maximum difference between two erase counters. If this threshold is
113  * exceeded, the WL sub-system starts moving data from used physical
114  * eraseblocks with low erase counter to free physical eraseblocks with high
115  * erase counter.
116  */
117 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
118
119 /*
120  * When a physical eraseblock is moved, the WL sub-system has to pick the target
121  * physical eraseblock to move to. The simplest way would be just to pick the
122  * one with the highest erase counter. But in certain workloads this could lead
123  * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
124  * situation when the picked physical eraseblock is constantly erased after the
125  * data is written to it. So, we have a constant which limits the highest erase
126  * counter of the free physical eraseblock to pick. Namely, the WL sub-system
127  * does not pick eraseblocks with erase counter greater than the lowest erase
128  * counter plus %WL_FREE_MAX_DIFF.
129  */
130 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
131
132 /*
133  * Maximum number of consecutive background thread failures which is enough to
134  * switch to read-only mode.
135  */
136 #define WL_MAX_FAILURES 32
137
138 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
139 static int self_check_in_wl_tree(const struct ubi_device *ubi,
140                                  struct ubi_wl_entry *e, struct rb_root *root);
141 static int self_check_in_pq(const struct ubi_device *ubi,
142                             struct ubi_wl_entry *e);
143
144 /**
145  * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
146  * @e: the wear-leveling entry to add
147  * @root: the root of the tree
148  *
149  * Note, we use (erase counter, physical eraseblock number) pairs as keys in
150  * the @ubi->used and @ubi->free RB-trees.
151  */
152 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
153 {
154         struct rb_node **p, *parent = NULL;
155
156         p = &root->rb_node;
157         while (*p) {
158                 struct ubi_wl_entry *e1;
159
160                 parent = *p;
161                 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
162
163                 if (e->ec < e1->ec)
164                         p = &(*p)->rb_left;
165                 else if (e->ec > e1->ec)
166                         p = &(*p)->rb_right;
167                 else {
168                         ubi_assert(e->pnum != e1->pnum);
169                         if (e->pnum < e1->pnum)
170                                 p = &(*p)->rb_left;
171                         else
172                                 p = &(*p)->rb_right;
173                 }
174         }
175
176         rb_link_node(&e->u.rb, parent, p);
177         rb_insert_color(&e->u.rb, root);
178 }
179
180 /**
181  * wl_tree_destroy - destroy a wear-leveling entry.
182  * @ubi: UBI device description object
183  * @e: the wear-leveling entry to add
184  *
185  * This function destroys a wear leveling entry and removes
186  * the reference from the lookup table.
187  */
188 static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
189 {
190         ubi->lookuptbl[e->pnum] = NULL;
191         kmem_cache_free(ubi_wl_entry_slab, e);
192 }
193
194 /**
195  * do_work - do one pending work.
196  * @ubi: UBI device description object
197  *
198  * This function returns zero in case of success and a negative error code in
199  * case of failure.
200  */
201 static int do_work(struct ubi_device *ubi)
202 {
203         int err;
204         struct ubi_work *wrk;
205
206         cond_resched();
207
208         /*
209          * @ubi->work_sem is used to synchronize with the workers. Workers take
210          * it in read mode, so many of them may be doing works at a time. But
211          * the queue flush code has to be sure the whole queue of works is
212          * done, and it takes the mutex in write mode.
213          */
214         down_read(&ubi->work_sem);
215         spin_lock(&ubi->wl_lock);
216         if (list_empty(&ubi->works)) {
217                 spin_unlock(&ubi->wl_lock);
218                 up_read(&ubi->work_sem);
219                 return 0;
220         }
221
222         wrk = list_entry(ubi->works.next, struct ubi_work, list);
223         list_del(&wrk->list);
224         ubi->works_count -= 1;
225         ubi_assert(ubi->works_count >= 0);
226         spin_unlock(&ubi->wl_lock);
227
228         /*
229          * Call the worker function. Do not touch the work structure
230          * after this call as it will have been freed or reused by that
231          * time by the worker function.
232          */
233         err = wrk->func(ubi, wrk, 0);
234         if (err)
235                 ubi_err(ubi, "work failed with error code %d", err);
236         up_read(&ubi->work_sem);
237
238         return err;
239 }
240
241 /**
242  * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
243  * @e: the wear-leveling entry to check
244  * @root: the root of the tree
245  *
246  * This function returns non-zero if @e is in the @root RB-tree and zero if it
247  * is not.
248  */
249 static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
250 {
251         struct rb_node *p;
252
253         p = root->rb_node;
254         while (p) {
255                 struct ubi_wl_entry *e1;
256
257                 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
258
259                 if (e->pnum == e1->pnum) {
260                         ubi_assert(e == e1);
261                         return 1;
262                 }
263
264                 if (e->ec < e1->ec)
265                         p = p->rb_left;
266                 else if (e->ec > e1->ec)
267                         p = p->rb_right;
268                 else {
269                         ubi_assert(e->pnum != e1->pnum);
270                         if (e->pnum < e1->pnum)
271                                 p = p->rb_left;
272                         else
273                                 p = p->rb_right;
274                 }
275         }
276
277         return 0;
278 }
279
280 /**
281  * in_pq - check if a wear-leveling entry is present in the protection queue.
282  * @ubi: UBI device description object
283  * @e: the wear-leveling entry to check
284  *
285  * This function returns non-zero if @e is in the protection queue and zero
286  * if it is not.
287  */
288 static inline int in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e)
289 {
290         struct ubi_wl_entry *p;
291         int i;
292
293         for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
294                 list_for_each_entry(p, &ubi->pq[i], u.list)
295                         if (p == e)
296                                 return 1;
297
298         return 0;
299 }
300
301 /**
302  * prot_queue_add - add physical eraseblock to the protection queue.
303  * @ubi: UBI device description object
304  * @e: the physical eraseblock to add
305  *
306  * This function adds @e to the tail of the protection queue @ubi->pq, where
307  * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
308  * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
309  * be locked.
310  */
311 static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
312 {
313         int pq_tail = ubi->pq_head - 1;
314
315         if (pq_tail < 0)
316                 pq_tail = UBI_PROT_QUEUE_LEN - 1;
317         ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
318         list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
319         dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
320 }
321
322 /**
323  * find_wl_entry - find wear-leveling entry closest to certain erase counter.
324  * @ubi: UBI device description object
325  * @root: the RB-tree where to look for
326  * @diff: maximum possible difference from the smallest erase counter
327  *
328  * This function looks for a wear leveling entry with erase counter closest to
329  * min + @diff, where min is the smallest erase counter.
330  */
331 static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
332                                           struct rb_root *root, int diff)
333 {
334         struct rb_node *p;
335         struct ubi_wl_entry *e, *prev_e = NULL;
336         int max;
337
338         e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
339         max = e->ec + diff;
340
341         p = root->rb_node;
342         while (p) {
343                 struct ubi_wl_entry *e1;
344
345                 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
346                 if (e1->ec >= max)
347                         p = p->rb_left;
348                 else {
349                         p = p->rb_right;
350                         prev_e = e;
351                         e = e1;
352                 }
353         }
354
355         /* If no fastmap has been written and this WL entry can be used
356          * as anchor PEB, hold it back and return the second best WL entry
357          * such that fastmap can use the anchor PEB later. */
358         if (prev_e && !ubi->fm_disabled &&
359             !ubi->fm && e->pnum < UBI_FM_MAX_START)
360                 return prev_e;
361
362         return e;
363 }
364
365 /**
366  * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
367  * @ubi: UBI device description object
368  * @root: the RB-tree where to look for
369  *
370  * This function looks for a wear leveling entry with medium erase counter,
371  * but not greater or equivalent than the lowest erase counter plus
372  * %WL_FREE_MAX_DIFF/2.
373  */
374 static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
375                                                struct rb_root *root)
376 {
377         struct ubi_wl_entry *e, *first, *last;
378
379         first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
380         last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
381
382         if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
383                 e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
384
385                 /* If no fastmap has been written and this WL entry can be used
386                  * as anchor PEB, hold it back and return the second best
387                  * WL entry such that fastmap can use the anchor PEB later. */
388                 e = may_reserve_for_fm(ubi, e, root);
389         } else
390                 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
391
392         return e;
393 }
394
395 /**
396  * wl_get_wle - get a mean wl entry to be used by ubi_wl_get_peb() or
397  * refill_wl_user_pool().
398  * @ubi: UBI device description object
399  *
400  * This function returns a a wear leveling entry in case of success and
401  * NULL in case of failure.
402  */
403 static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
404 {
405         struct ubi_wl_entry *e;
406
407         e = find_mean_wl_entry(ubi, &ubi->free);
408         if (!e) {
409                 ubi_err(ubi, "no free eraseblocks");
410                 return NULL;
411         }
412
413         self_check_in_wl_tree(ubi, e, &ubi->free);
414
415         /*
416          * Move the physical eraseblock to the protection queue where it will
417          * be protected from being moved for some time.
418          */
419         rb_erase(&e->u.rb, &ubi->free);
420         ubi->free_count--;
421         dbg_wl("PEB %d EC %d", e->pnum, e->ec);
422
423         return e;
424 }
425
426 /**
427  * prot_queue_del - remove a physical eraseblock from the protection queue.
428  * @ubi: UBI device description object
429  * @pnum: the physical eraseblock to remove
430  *
431  * This function deletes PEB @pnum from the protection queue and returns zero
432  * in case of success and %-ENODEV if the PEB was not found.
433  */
434 static int prot_queue_del(struct ubi_device *ubi, int pnum)
435 {
436         struct ubi_wl_entry *e;
437
438         e = ubi->lookuptbl[pnum];
439         if (!e)
440                 return -ENODEV;
441
442         if (self_check_in_pq(ubi, e))
443                 return -ENODEV;
444
445         list_del(&e->u.list);
446         dbg_wl("deleted PEB %d from the protection queue", e->pnum);
447         return 0;
448 }
449
450 /**
451  * sync_erase - synchronously erase a physical eraseblock.
452  * @ubi: UBI device description object
453  * @e: the the physical eraseblock to erase
454  * @torture: if the physical eraseblock has to be tortured
455  *
456  * This function returns zero in case of success and a negative error code in
457  * case of failure.
458  */
459 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
460                       int torture)
461 {
462         int err;
463         struct ubi_ec_hdr *ec_hdr;
464         unsigned long long ec = e->ec;
465
466         dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
467
468         err = self_check_ec(ubi, e->pnum, e->ec);
469         if (err)
470                 return -EINVAL;
471
472         ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
473         if (!ec_hdr)
474                 return -ENOMEM;
475
476         err = ubi_io_sync_erase(ubi, e->pnum, torture);
477         if (err < 0)
478                 goto out_free;
479
480         ec += err;
481         if (ec > UBI_MAX_ERASECOUNTER) {
482                 /*
483                  * Erase counter overflow. Upgrade UBI and use 64-bit
484                  * erase counters internally.
485                  */
486                 ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu",
487                         e->pnum, ec);
488                 err = -EINVAL;
489                 goto out_free;
490         }
491
492         dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
493
494         ec_hdr->ec = cpu_to_be64(ec);
495
496         err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
497         if (err)
498                 goto out_free;
499
500         e->ec = ec;
501         spin_lock(&ubi->wl_lock);
502         if (e->ec > ubi->max_ec)
503                 ubi->max_ec = e->ec;
504         spin_unlock(&ubi->wl_lock);
505
506 out_free:
507         kfree(ec_hdr);
508         return err;
509 }
510
511 /**
512  * serve_prot_queue - check if it is time to stop protecting PEBs.
513  * @ubi: UBI device description object
514  *
515  * This function is called after each erase operation and removes PEBs from the
516  * tail of the protection queue. These PEBs have been protected for long enough
517  * and should be moved to the used tree.
518  */
519 static void serve_prot_queue(struct ubi_device *ubi)
520 {
521         struct ubi_wl_entry *e, *tmp;
522         int count;
523
524         /*
525          * There may be several protected physical eraseblock to remove,
526          * process them all.
527          */
528 repeat:
529         count = 0;
530         spin_lock(&ubi->wl_lock);
531         list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
532                 dbg_wl("PEB %d EC %d protection over, move to used tree",
533                         e->pnum, e->ec);
534
535                 list_del(&e->u.list);
536                 wl_tree_add(e, &ubi->used);
537                 if (count++ > 32) {
538                         /*
539                          * Let's be nice and avoid holding the spinlock for
540                          * too long.
541                          */
542                         spin_unlock(&ubi->wl_lock);
543                         cond_resched();
544                         goto repeat;
545                 }
546         }
547
548         ubi->pq_head += 1;
549         if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
550                 ubi->pq_head = 0;
551         ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
552         spin_unlock(&ubi->wl_lock);
553 }
554
555 /**
556  * __schedule_ubi_work - schedule a work.
557  * @ubi: UBI device description object
558  * @wrk: the work to schedule
559  *
560  * This function adds a work defined by @wrk to the tail of the pending works
561  * list. Can only be used if ubi->work_sem is already held in read mode!
562  */
563 static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
564 {
565         spin_lock(&ubi->wl_lock);
566         list_add_tail(&wrk->list, &ubi->works);
567         ubi_assert(ubi->works_count >= 0);
568         ubi->works_count += 1;
569         if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
570                 wake_up_process(ubi->bgt_thread);
571         spin_unlock(&ubi->wl_lock);
572 }
573
574 /**
575  * schedule_ubi_work - schedule a work.
576  * @ubi: UBI device description object
577  * @wrk: the work to schedule
578  *
579  * This function adds a work defined by @wrk to the tail of the pending works
580  * list.
581  */
582 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
583 {
584         down_read(&ubi->work_sem);
585         __schedule_ubi_work(ubi, wrk);
586         up_read(&ubi->work_sem);
587 }
588
589 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
590                         int shutdown);
591
592 /**
593  * schedule_erase - schedule an erase work.
594  * @ubi: UBI device description object
595  * @e: the WL entry of the physical eraseblock to erase
596  * @vol_id: the volume ID that last used this PEB
597  * @lnum: the last used logical eraseblock number for the PEB
598  * @torture: if the physical eraseblock has to be tortured
599  *
600  * This function returns zero in case of success and a %-ENOMEM in case of
601  * failure.
602  */
603 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
604                           int vol_id, int lnum, int torture, bool nested)
605 {
606         struct ubi_work *wl_wrk;
607
608         ubi_assert(e);
609
610         dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
611                e->pnum, e->ec, torture);
612
613         wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
614         if (!wl_wrk)
615                 return -ENOMEM;
616
617         wl_wrk->func = &erase_worker;
618         wl_wrk->e = e;
619         wl_wrk->vol_id = vol_id;
620         wl_wrk->lnum = lnum;
621         wl_wrk->torture = torture;
622
623         if (nested)
624                 __schedule_ubi_work(ubi, wl_wrk);
625         else
626                 schedule_ubi_work(ubi, wl_wrk);
627         return 0;
628 }
629
630 static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk);
631 /**
632  * do_sync_erase - run the erase worker synchronously.
633  * @ubi: UBI device description object
634  * @e: the WL entry of the physical eraseblock to erase
635  * @vol_id: the volume ID that last used this PEB
636  * @lnum: the last used logical eraseblock number for the PEB
637  * @torture: if the physical eraseblock has to be tortured
638  *
639  */
640 static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
641                          int vol_id, int lnum, int torture)
642 {
643         struct ubi_work wl_wrk;
644
645         dbg_wl("sync erase of PEB %i", e->pnum);
646
647         wl_wrk.e = e;
648         wl_wrk.vol_id = vol_id;
649         wl_wrk.lnum = lnum;
650         wl_wrk.torture = torture;
651
652         return __erase_worker(ubi, &wl_wrk);
653 }
654
655 static int ensure_wear_leveling(struct ubi_device *ubi, int nested);
656 /**
657  * wear_leveling_worker - wear-leveling worker function.
658  * @ubi: UBI device description object
659  * @wrk: the work object
660  * @shutdown: non-zero if the worker has to free memory and exit
661  * because the WL-subsystem is shutting down
662  *
663  * This function copies a more worn out physical eraseblock to a less worn out
664  * one. Returns zero in case of success and a negative error code in case of
665  * failure.
666  */
667 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
668                                 int shutdown)
669 {
670         int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
671         int erase = 0, keep = 0, vol_id = -1, lnum = -1;
672 #ifdef CONFIG_MTD_UBI_FASTMAP
673         int anchor = wrk->anchor;
674 #endif
675         struct ubi_wl_entry *e1, *e2;
676         struct ubi_vid_io_buf *vidb;
677         struct ubi_vid_hdr *vid_hdr;
678         int dst_leb_clean = 0;
679
680         kfree(wrk);
681         if (shutdown)
682                 return 0;
683
684         vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
685         if (!vidb)
686                 return -ENOMEM;
687
688         vid_hdr = ubi_get_vid_hdr(vidb);
689
690         down_read(&ubi->fm_eba_sem);
691         mutex_lock(&ubi->move_mutex);
692         spin_lock(&ubi->wl_lock);
693         ubi_assert(!ubi->move_from && !ubi->move_to);
694         ubi_assert(!ubi->move_to_put);
695
696         if (!ubi->free.rb_node ||
697             (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
698                 /*
699                  * No free physical eraseblocks? Well, they must be waiting in
700                  * the queue to be erased. Cancel movement - it will be
701                  * triggered again when a free physical eraseblock appears.
702                  *
703                  * No used physical eraseblocks? They must be temporarily
704                  * protected from being moved. They will be moved to the
705                  * @ubi->used tree later and the wear-leveling will be
706                  * triggered again.
707                  */
708                 dbg_wl("cancel WL, a list is empty: free %d, used %d",
709                        !ubi->free.rb_node, !ubi->used.rb_node);
710                 goto out_cancel;
711         }
712
713 #ifdef CONFIG_MTD_UBI_FASTMAP
714         /* Check whether we need to produce an anchor PEB */
715         if (!anchor)
716                 anchor = !anchor_pebs_available(&ubi->free);
717
718         if (anchor) {
719                 e1 = find_anchor_wl_entry(&ubi->used);
720                 if (!e1)
721                         goto out_cancel;
722                 e2 = get_peb_for_wl(ubi);
723                 if (!e2)
724                         goto out_cancel;
725
726                 self_check_in_wl_tree(ubi, e1, &ubi->used);
727                 rb_erase(&e1->u.rb, &ubi->used);
728                 dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
729         } else if (!ubi->scrub.rb_node) {
730 #else
731         if (!ubi->scrub.rb_node) {
732 #endif
733                 /*
734                  * Now pick the least worn-out used physical eraseblock and a
735                  * highly worn-out free physical eraseblock. If the erase
736                  * counters differ much enough, start wear-leveling.
737                  */
738                 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
739                 e2 = get_peb_for_wl(ubi);
740                 if (!e2)
741                         goto out_cancel;
742
743                 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
744                         dbg_wl("no WL needed: min used EC %d, max free EC %d",
745                                e1->ec, e2->ec);
746
747                         /* Give the unused PEB back */
748                         wl_tree_add(e2, &ubi->free);
749                         ubi->free_count++;
750                         goto out_cancel;
751                 }
752                 self_check_in_wl_tree(ubi, e1, &ubi->used);
753                 rb_erase(&e1->u.rb, &ubi->used);
754                 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
755                        e1->pnum, e1->ec, e2->pnum, e2->ec);
756         } else {
757                 /* Perform scrubbing */
758                 scrubbing = 1;
759                 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
760                 e2 = get_peb_for_wl(ubi);
761                 if (!e2)
762                         goto out_cancel;
763
764                 self_check_in_wl_tree(ubi, e1, &ubi->scrub);
765                 rb_erase(&e1->u.rb, &ubi->scrub);
766                 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
767         }
768
769         ubi->move_from = e1;
770         ubi->move_to = e2;
771         spin_unlock(&ubi->wl_lock);
772
773         /*
774          * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
775          * We so far do not know which logical eraseblock our physical
776          * eraseblock (@e1) belongs to. We have to read the volume identifier
777          * header first.
778          *
779          * Note, we are protected from this PEB being unmapped and erased. The
780          * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
781          * which is being moved was unmapped.
782          */
783
784         err = ubi_io_read_vid_hdr(ubi, e1->pnum, vidb, 0);
785         if (err && err != UBI_IO_BITFLIPS) {
786                 dst_leb_clean = 1;
787                 if (err == UBI_IO_FF) {
788                         /*
789                          * We are trying to move PEB without a VID header. UBI
790                          * always write VID headers shortly after the PEB was
791                          * given, so we have a situation when it has not yet
792                          * had a chance to write it, because it was preempted.
793                          * So add this PEB to the protection queue so far,
794                          * because presumably more data will be written there
795                          * (including the missing VID header), and then we'll
796                          * move it.
797                          */
798                         dbg_wl("PEB %d has no VID header", e1->pnum);
799                         protect = 1;
800                         goto out_not_moved;
801                 } else if (err == UBI_IO_FF_BITFLIPS) {
802                         /*
803                          * The same situation as %UBI_IO_FF, but bit-flips were
804                          * detected. It is better to schedule this PEB for
805                          * scrubbing.
806                          */
807                         dbg_wl("PEB %d has no VID header but has bit-flips",
808                                e1->pnum);
809                         scrubbing = 1;
810                         goto out_not_moved;
811                 } else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) {
812                         /*
813                          * While a full scan would detect interrupted erasures
814                          * at attach time we can face them here when attached from
815                          * Fastmap.
816                          */
817                         dbg_wl("PEB %d has ECC errors, maybe from an interrupted erasure",
818                                e1->pnum);
819                         erase = 1;
820                         goto out_not_moved;
821                 }
822
823                 ubi_err(ubi, "error %d while reading VID header from PEB %d",
824                         err, e1->pnum);
825                 goto out_error;
826         }
827
828         vol_id = be32_to_cpu(vid_hdr->vol_id);
829         lnum = be32_to_cpu(vid_hdr->lnum);
830
831         err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vidb);
832         if (err) {
833                 if (err == MOVE_CANCEL_RACE) {
834                         /*
835                          * The LEB has not been moved because the volume is
836                          * being deleted or the PEB has been put meanwhile. We
837                          * should prevent this PEB from being selected for
838                          * wear-leveling movement again, so put it to the
839                          * protection queue.
840                          */
841                         protect = 1;
842                         dst_leb_clean = 1;
843                         goto out_not_moved;
844                 }
845                 if (err == MOVE_RETRY) {
846                         scrubbing = 1;
847                         dst_leb_clean = 1;
848                         goto out_not_moved;
849                 }
850                 if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
851                     err == MOVE_TARGET_RD_ERR) {
852                         /*
853                          * Target PEB had bit-flips or write error - torture it.
854                          */
855                         torture = 1;
856                         keep = 1;
857                         goto out_not_moved;
858                 }
859
860                 if (err == MOVE_SOURCE_RD_ERR) {
861                         /*
862                          * An error happened while reading the source PEB. Do
863                          * not switch to R/O mode in this case, and give the
864                          * upper layers a possibility to recover from this,
865                          * e.g. by unmapping corresponding LEB. Instead, just
866                          * put this PEB to the @ubi->erroneous list to prevent
867                          * UBI from trying to move it over and over again.
868                          */
869                         if (ubi->erroneous_peb_count > ubi->max_erroneous) {
870                                 ubi_err(ubi, "too many erroneous eraseblocks (%d)",
871                                         ubi->erroneous_peb_count);
872                                 goto out_error;
873                         }
874                         dst_leb_clean = 1;
875                         erroneous = 1;
876                         goto out_not_moved;
877                 }
878
879                 if (err < 0)
880                         goto out_error;
881
882                 ubi_assert(0);
883         }
884
885         /* The PEB has been successfully moved */
886         if (scrubbing)
887                 ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
888                         e1->pnum, vol_id, lnum, e2->pnum);
889         ubi_free_vid_buf(vidb);
890
891         spin_lock(&ubi->wl_lock);
892         if (!ubi->move_to_put) {
893                 wl_tree_add(e2, &ubi->used);
894                 e2 = NULL;
895         }
896         ubi->move_from = ubi->move_to = NULL;
897         ubi->move_to_put = ubi->wl_scheduled = 0;
898         spin_unlock(&ubi->wl_lock);
899
900         err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
901         if (err) {
902                 if (e2)
903                         wl_entry_destroy(ubi, e2);
904                 goto out_ro;
905         }
906
907         if (e2) {
908                 /*
909                  * Well, the target PEB was put meanwhile, schedule it for
910                  * erasure.
911                  */
912                 dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
913                        e2->pnum, vol_id, lnum);
914                 err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
915                 if (err)
916                         goto out_ro;
917         }
918
919         dbg_wl("done");
920         mutex_unlock(&ubi->move_mutex);
921         up_read(&ubi->fm_eba_sem);
922         return 0;
923
924         /*
925          * For some reasons the LEB was not moved, might be an error, might be
926          * something else. @e1 was not changed, so return it back. @e2 might
927          * have been changed, schedule it for erasure.
928          */
929 out_not_moved:
930         if (vol_id != -1)
931                 dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
932                        e1->pnum, vol_id, lnum, e2->pnum, err);
933         else
934                 dbg_wl("cancel moving PEB %d to PEB %d (%d)",
935                        e1->pnum, e2->pnum, err);
936         spin_lock(&ubi->wl_lock);
937         if (protect)
938                 prot_queue_add(ubi, e1);
939         else if (erroneous) {
940                 wl_tree_add(e1, &ubi->erroneous);
941                 ubi->erroneous_peb_count += 1;
942         } else if (scrubbing)
943                 wl_tree_add(e1, &ubi->scrub);
944         else if (keep)
945                 wl_tree_add(e1, &ubi->used);
946         if (dst_leb_clean) {
947                 wl_tree_add(e2, &ubi->free);
948                 ubi->free_count++;
949         }
950
951         ubi_assert(!ubi->move_to_put);
952         ubi->move_from = ubi->move_to = NULL;
953         ubi->wl_scheduled = 0;
954         spin_unlock(&ubi->wl_lock);
955
956         ubi_free_vid_buf(vidb);
957         if (dst_leb_clean) {
958                 ensure_wear_leveling(ubi, 1);
959         } else {
960                 err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
961                 if (err)
962                         goto out_ro;
963         }
964
965         if (erase) {
966                 err = do_sync_erase(ubi, e1, vol_id, lnum, 1);
967                 if (err)
968                         goto out_ro;
969         }
970
971         mutex_unlock(&ubi->move_mutex);
972         up_read(&ubi->fm_eba_sem);
973         return 0;
974
975 out_error:
976         if (vol_id != -1)
977                 ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
978                         err, e1->pnum, e2->pnum);
979         else
980                 ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
981                         err, e1->pnum, vol_id, lnum, e2->pnum);
982         spin_lock(&ubi->wl_lock);
983         ubi->move_from = ubi->move_to = NULL;
984         ubi->move_to_put = ubi->wl_scheduled = 0;
985         spin_unlock(&ubi->wl_lock);
986
987         ubi_free_vid_buf(vidb);
988         wl_entry_destroy(ubi, e1);
989         wl_entry_destroy(ubi, e2);
990
991 out_ro:
992         ubi_ro_mode(ubi);
993         mutex_unlock(&ubi->move_mutex);
994         up_read(&ubi->fm_eba_sem);
995         ubi_assert(err != 0);
996         return err < 0 ? err : -EIO;
997
998 out_cancel:
999         ubi->wl_scheduled = 0;
1000         spin_unlock(&ubi->wl_lock);
1001         mutex_unlock(&ubi->move_mutex);
1002         up_read(&ubi->fm_eba_sem);
1003         ubi_free_vid_buf(vidb);
1004         return 0;
1005 }
1006
1007 /**
1008  * ensure_wear_leveling - schedule wear-leveling if it is needed.
1009  * @ubi: UBI device description object
1010  * @nested: set to non-zero if this function is called from UBI worker
1011  *
1012  * This function checks if it is time to start wear-leveling and schedules it
1013  * if yes. This function returns zero in case of success and a negative error
1014  * code in case of failure.
1015  */
1016 static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
1017 {
1018         int err = 0;
1019         struct ubi_wl_entry *e1;
1020         struct ubi_wl_entry *e2;
1021         struct ubi_work *wrk;
1022
1023         spin_lock(&ubi->wl_lock);
1024         if (ubi->wl_scheduled)
1025                 /* Wear-leveling is already in the work queue */
1026                 goto out_unlock;
1027
1028         /*
1029          * If the ubi->scrub tree is not empty, scrubbing is needed, and the
1030          * the WL worker has to be scheduled anyway.
1031          */
1032         if (!ubi->scrub.rb_node) {
1033                 if (!ubi->used.rb_node || !ubi->free.rb_node)
1034                         /* No physical eraseblocks - no deal */
1035                         goto out_unlock;
1036
1037                 /*
1038                  * We schedule wear-leveling only if the difference between the
1039                  * lowest erase counter of used physical eraseblocks and a high
1040                  * erase counter of free physical eraseblocks is greater than
1041                  * %UBI_WL_THRESHOLD.
1042                  */
1043                 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1044                 e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1045
1046                 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1047                         goto out_unlock;
1048                 dbg_wl("schedule wear-leveling");
1049         } else
1050                 dbg_wl("schedule scrubbing");
1051
1052         ubi->wl_scheduled = 1;
1053         spin_unlock(&ubi->wl_lock);
1054
1055         wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1056         if (!wrk) {
1057                 err = -ENOMEM;
1058                 goto out_cancel;
1059         }
1060
1061         wrk->anchor = 0;
1062         wrk->func = &wear_leveling_worker;
1063         if (nested)
1064                 __schedule_ubi_work(ubi, wrk);
1065         else
1066                 schedule_ubi_work(ubi, wrk);
1067         return err;
1068
1069 out_cancel:
1070         spin_lock(&ubi->wl_lock);
1071         ubi->wl_scheduled = 0;
1072 out_unlock:
1073         spin_unlock(&ubi->wl_lock);
1074         return err;
1075 }
1076
1077 /**
1078  * __erase_worker - physical eraseblock erase worker function.
1079  * @ubi: UBI device description object
1080  * @wl_wrk: the work object
1081  * @shutdown: non-zero if the worker has to free memory and exit
1082  * because the WL sub-system is shutting down
1083  *
1084  * This function erases a physical eraseblock and perform torture testing if
1085  * needed. It also takes care about marking the physical eraseblock bad if
1086  * needed. Returns zero in case of success and a negative error code in case of
1087  * failure.
1088  */
1089 static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
1090 {
1091         struct ubi_wl_entry *e = wl_wrk->e;
1092         int pnum = e->pnum;
1093         int vol_id = wl_wrk->vol_id;
1094         int lnum = wl_wrk->lnum;
1095         int err, available_consumed = 0;
1096
1097         dbg_wl("erase PEB %d EC %d LEB %d:%d",
1098                pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
1099
1100         err = sync_erase(ubi, e, wl_wrk->torture);
1101         if (!err) {
1102                 spin_lock(&ubi->wl_lock);
1103                 wl_tree_add(e, &ubi->free);
1104                 ubi->free_count++;
1105                 spin_unlock(&ubi->wl_lock);
1106
1107                 /*
1108                  * One more erase operation has happened, take care about
1109                  * protected physical eraseblocks.
1110                  */
1111                 serve_prot_queue(ubi);
1112
1113                 /* And take care about wear-leveling */
1114                 err = ensure_wear_leveling(ubi, 1);
1115                 return err;
1116         }
1117
1118         ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
1119
1120         if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1121             err == -EBUSY) {
1122                 int err1;
1123
1124                 /* Re-schedule the LEB for erasure */
1125                 err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
1126                 if (err1) {
1127                         wl_entry_destroy(ubi, e);
1128                         err = err1;
1129                         goto out_ro;
1130                 }
1131                 return err;
1132         }
1133
1134         wl_entry_destroy(ubi, e);
1135         if (err != -EIO)
1136                 /*
1137                  * If this is not %-EIO, we have no idea what to do. Scheduling
1138                  * this physical eraseblock for erasure again would cause
1139                  * errors again and again. Well, lets switch to R/O mode.
1140                  */
1141                 goto out_ro;
1142
1143         /* It is %-EIO, the PEB went bad */
1144
1145         if (!ubi->bad_allowed) {
1146                 ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
1147                 goto out_ro;
1148         }
1149
1150         spin_lock(&ubi->volumes_lock);
1151         if (ubi->beb_rsvd_pebs == 0) {
1152                 if (ubi->avail_pebs == 0) {
1153                         spin_unlock(&ubi->volumes_lock);
1154                         ubi_err(ubi, "no reserved/available physical eraseblocks");
1155                         goto out_ro;
1156                 }
1157                 ubi->avail_pebs -= 1;
1158                 available_consumed = 1;
1159         }
1160         spin_unlock(&ubi->volumes_lock);
1161
1162         ubi_msg(ubi, "mark PEB %d as bad", pnum);
1163         err = ubi_io_mark_bad(ubi, pnum);
1164         if (err)
1165                 goto out_ro;
1166
1167         spin_lock(&ubi->volumes_lock);
1168         if (ubi->beb_rsvd_pebs > 0) {
1169                 if (available_consumed) {
1170                         /*
1171                          * The amount of reserved PEBs increased since we last
1172                          * checked.
1173                          */
1174                         ubi->avail_pebs += 1;
1175                         available_consumed = 0;
1176                 }
1177                 ubi->beb_rsvd_pebs -= 1;
1178         }
1179         ubi->bad_peb_count += 1;
1180         ubi->good_peb_count -= 1;
1181         ubi_calculate_reserved(ubi);
1182         if (available_consumed)
1183                 ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
1184         else if (ubi->beb_rsvd_pebs)
1185                 ubi_msg(ubi, "%d PEBs left in the reserve",
1186                         ubi->beb_rsvd_pebs);
1187         else
1188                 ubi_warn(ubi, "last PEB from the reserve was used");
1189         spin_unlock(&ubi->volumes_lock);
1190
1191         return err;
1192
1193 out_ro:
1194         if (available_consumed) {
1195                 spin_lock(&ubi->volumes_lock);
1196                 ubi->avail_pebs += 1;
1197                 spin_unlock(&ubi->volumes_lock);
1198         }
1199         ubi_ro_mode(ubi);
1200         return err;
1201 }
1202
1203 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1204                           int shutdown)
1205 {
1206         int ret;
1207
1208         if (shutdown) {
1209                 struct ubi_wl_entry *e = wl_wrk->e;
1210
1211                 dbg_wl("cancel erasure of PEB %d EC %d", e->pnum, e->ec);
1212                 kfree(wl_wrk);
1213                 wl_entry_destroy(ubi, e);
1214                 return 0;
1215         }
1216
1217         ret = __erase_worker(ubi, wl_wrk);
1218         kfree(wl_wrk);
1219         return ret;
1220 }
1221
1222 /**
1223  * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
1224  * @ubi: UBI device description object
1225  * @vol_id: the volume ID that last used this PEB
1226  * @lnum: the last used logical eraseblock number for the PEB
1227  * @pnum: physical eraseblock to return
1228  * @torture: if this physical eraseblock has to be tortured
1229  *
1230  * This function is called to return physical eraseblock @pnum to the pool of
1231  * free physical eraseblocks. The @torture flag has to be set if an I/O error
1232  * occurred to this @pnum and it has to be tested. This function returns zero
1233  * in case of success, and a negative error code in case of failure.
1234  */
1235 int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
1236                    int pnum, int torture)
1237 {
1238         int err;
1239         struct ubi_wl_entry *e;
1240
1241         dbg_wl("PEB %d", pnum);
1242         ubi_assert(pnum >= 0);
1243         ubi_assert(pnum < ubi->peb_count);
1244
1245         down_read(&ubi->fm_protect);
1246
1247 retry:
1248         spin_lock(&ubi->wl_lock);
1249         e = ubi->lookuptbl[pnum];
1250         if (e == ubi->move_from) {
1251                 /*
1252                  * User is putting the physical eraseblock which was selected to
1253                  * be moved. It will be scheduled for erasure in the
1254                  * wear-leveling worker.
1255                  */
1256                 dbg_wl("PEB %d is being moved, wait", pnum);
1257                 spin_unlock(&ubi->wl_lock);
1258
1259                 /* Wait for the WL worker by taking the @ubi->move_mutex */
1260                 mutex_lock(&ubi->move_mutex);
1261                 mutex_unlock(&ubi->move_mutex);
1262                 goto retry;
1263         } else if (e == ubi->move_to) {
1264                 /*
1265                  * User is putting the physical eraseblock which was selected
1266                  * as the target the data is moved to. It may happen if the EBA
1267                  * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
1268                  * but the WL sub-system has not put the PEB to the "used" tree
1269                  * yet, but it is about to do this. So we just set a flag which
1270                  * will tell the WL worker that the PEB is not needed anymore
1271                  * and should be scheduled for erasure.
1272                  */
1273                 dbg_wl("PEB %d is the target of data moving", pnum);
1274                 ubi_assert(!ubi->move_to_put);
1275                 ubi->move_to_put = 1;
1276                 spin_unlock(&ubi->wl_lock);
1277                 up_read(&ubi->fm_protect);
1278                 return 0;
1279         } else {
1280                 if (in_wl_tree(e, &ubi->used)) {
1281                         self_check_in_wl_tree(ubi, e, &ubi->used);
1282                         rb_erase(&e->u.rb, &ubi->used);
1283                 } else if (in_wl_tree(e, &ubi->scrub)) {
1284                         self_check_in_wl_tree(ubi, e, &ubi->scrub);
1285                         rb_erase(&e->u.rb, &ubi->scrub);
1286                 } else if (in_wl_tree(e, &ubi->erroneous)) {
1287                         self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1288                         rb_erase(&e->u.rb, &ubi->erroneous);
1289                         ubi->erroneous_peb_count -= 1;
1290                         ubi_assert(ubi->erroneous_peb_count >= 0);
1291                         /* Erroneous PEBs should be tortured */
1292                         torture = 1;
1293                 } else {
1294                         err = prot_queue_del(ubi, e->pnum);
1295                         if (err) {
1296                                 ubi_err(ubi, "PEB %d not found", pnum);
1297                                 ubi_ro_mode(ubi);
1298                                 spin_unlock(&ubi->wl_lock);
1299                                 up_read(&ubi->fm_protect);
1300                                 return err;
1301                         }
1302                 }
1303         }
1304         spin_unlock(&ubi->wl_lock);
1305
1306         err = schedule_erase(ubi, e, vol_id, lnum, torture, false);
1307         if (err) {
1308                 spin_lock(&ubi->wl_lock);
1309                 wl_tree_add(e, &ubi->used);
1310                 spin_unlock(&ubi->wl_lock);
1311         }
1312
1313         up_read(&ubi->fm_protect);
1314         return err;
1315 }
1316
1317 /**
1318  * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1319  * @ubi: UBI device description object
1320  * @pnum: the physical eraseblock to schedule
1321  *
1322  * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1323  * needs scrubbing. This function schedules a physical eraseblock for
1324  * scrubbing which is done in background. This function returns zero in case of
1325  * success and a negative error code in case of failure.
1326  */
1327 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1328 {
1329         struct ubi_wl_entry *e;
1330
1331         ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
1332
1333 retry:
1334         spin_lock(&ubi->wl_lock);
1335         e = ubi->lookuptbl[pnum];
1336         if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1337                                    in_wl_tree(e, &ubi->erroneous)) {
1338                 spin_unlock(&ubi->wl_lock);
1339                 return 0;
1340         }
1341
1342         if (e == ubi->move_to) {
1343                 /*
1344                  * This physical eraseblock was used to move data to. The data
1345                  * was moved but the PEB was not yet inserted to the proper
1346                  * tree. We should just wait a little and let the WL worker
1347                  * proceed.
1348                  */
1349                 spin_unlock(&ubi->wl_lock);
1350                 dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1351                 yield();
1352                 goto retry;
1353         }
1354
1355         if (in_wl_tree(e, &ubi->used)) {
1356                 self_check_in_wl_tree(ubi, e, &ubi->used);
1357                 rb_erase(&e->u.rb, &ubi->used);
1358         } else {
1359                 int err;
1360
1361                 err = prot_queue_del(ubi, e->pnum);
1362                 if (err) {
1363                         ubi_err(ubi, "PEB %d not found", pnum);
1364                         ubi_ro_mode(ubi);
1365                         spin_unlock(&ubi->wl_lock);
1366                         return err;
1367                 }
1368         }
1369
1370         wl_tree_add(e, &ubi->scrub);
1371         spin_unlock(&ubi->wl_lock);
1372
1373         /*
1374          * Technically scrubbing is the same as wear-leveling, so it is done
1375          * by the WL worker.
1376          */
1377         return ensure_wear_leveling(ubi, 0);
1378 }
1379
1380 /**
1381  * ubi_wl_flush - flush all pending works.
1382  * @ubi: UBI device description object
1383  * @vol_id: the volume id to flush for
1384  * @lnum: the logical eraseblock number to flush for
1385  *
1386  * This function executes all pending works for a particular volume id /
1387  * logical eraseblock number pair. If either value is set to %UBI_ALL, then it
1388  * acts as a wildcard for all of the corresponding volume numbers or logical
1389  * eraseblock numbers. It returns zero in case of success and a negative error
1390  * code in case of failure.
1391  */
1392 int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1393 {
1394         int err = 0;
1395         int found = 1;
1396
1397         /*
1398          * Erase while the pending works queue is not empty, but not more than
1399          * the number of currently pending works.
1400          */
1401         dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
1402                vol_id, lnum, ubi->works_count);
1403
1404         while (found) {
1405                 struct ubi_work *wrk, *tmp;
1406                 found = 0;
1407
1408                 down_read(&ubi->work_sem);
1409                 spin_lock(&ubi->wl_lock);
1410                 list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
1411                         if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
1412                             (lnum == UBI_ALL || wrk->lnum == lnum)) {
1413                                 list_del(&wrk->list);
1414                                 ubi->works_count -= 1;
1415                                 ubi_assert(ubi->works_count >= 0);
1416                                 spin_unlock(&ubi->wl_lock);
1417
1418                                 err = wrk->func(ubi, wrk, 0);
1419                                 if (err) {
1420                                         up_read(&ubi->work_sem);
1421                                         return err;
1422                                 }
1423
1424                                 spin_lock(&ubi->wl_lock);
1425                                 found = 1;
1426                                 break;
1427                         }
1428                 }
1429                 spin_unlock(&ubi->wl_lock);
1430                 up_read(&ubi->work_sem);
1431         }
1432
1433         /*
1434          * Make sure all the works which have been done in parallel are
1435          * finished.
1436          */
1437         down_write(&ubi->work_sem);
1438         up_write(&ubi->work_sem);
1439
1440         return err;
1441 }
1442
1443 static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e)
1444 {
1445         if (in_wl_tree(e, &ubi->scrub))
1446                 return false;
1447         else if (in_wl_tree(e, &ubi->erroneous))
1448                 return false;
1449         else if (ubi->move_from == e)
1450                 return false;
1451         else if (ubi->move_to == e)
1452                 return false;
1453
1454         return true;
1455 }
1456
1457 /**
1458  * ubi_bitflip_check - Check an eraseblock for bitflips and scrub it if needed.
1459  * @ubi: UBI device description object
1460  * @pnum: the physical eraseblock to schedule
1461  * @force: dont't read the block, assume bitflips happened and take action.
1462  *
1463  * This function reads the given eraseblock and checks if bitflips occured.
1464  * In case of bitflips, the eraseblock is scheduled for scrubbing.
1465  * If scrubbing is forced with @force, the eraseblock is not read,
1466  * but scheduled for scrubbing right away.
1467  *
1468  * Returns:
1469  * %EINVAL, PEB is out of range
1470  * %ENOENT, PEB is no longer used by UBI
1471  * %EBUSY, PEB cannot be checked now or a check is currently running on it
1472  * %EAGAIN, bit flips happened but scrubbing is currently not possible
1473  * %EUCLEAN, bit flips happened and PEB is scheduled for scrubbing
1474  * %0, no bit flips detected
1475  */
1476 int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force)
1477 {
1478         int err;
1479         struct ubi_wl_entry *e;
1480
1481         if (pnum < 0 || pnum >= ubi->peb_count) {
1482                 err = -EINVAL;
1483                 goto out;
1484         }
1485
1486         /*
1487          * Pause all parallel work, otherwise it can happen that the
1488          * erase worker frees a wl entry under us.
1489          */
1490         down_write(&ubi->work_sem);
1491
1492         /*
1493          * Make sure that the wl entry does not change state while
1494          * inspecting it.
1495          */
1496         spin_lock(&ubi->wl_lock);
1497         e = ubi->lookuptbl[pnum];
1498         if (!e) {
1499                 spin_unlock(&ubi->wl_lock);
1500                 err = -ENOENT;
1501                 goto out_resume;
1502         }
1503
1504         /*
1505          * Does it make sense to check this PEB?
1506          */
1507         if (!scrub_possible(ubi, e)) {
1508                 spin_unlock(&ubi->wl_lock);
1509                 err = -EBUSY;
1510                 goto out_resume;
1511         }
1512         spin_unlock(&ubi->wl_lock);
1513
1514         if (!force) {
1515                 mutex_lock(&ubi->buf_mutex);
1516                 err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
1517                 mutex_unlock(&ubi->buf_mutex);
1518         }
1519
1520         if (force || err == UBI_IO_BITFLIPS) {
1521                 /*
1522                  * Okay, bit flip happened, let's figure out what we can do.
1523                  */
1524                 spin_lock(&ubi->wl_lock);
1525
1526                 /*
1527                  * Recheck. We released wl_lock, UBI might have killed the
1528                  * wl entry under us.
1529                  */
1530                 e = ubi->lookuptbl[pnum];
1531                 if (!e) {
1532                         spin_unlock(&ubi->wl_lock);
1533                         err = -ENOENT;
1534                         goto out_resume;
1535                 }
1536
1537                 /*
1538                  * Need to re-check state
1539                  */
1540                 if (!scrub_possible(ubi, e)) {
1541                         spin_unlock(&ubi->wl_lock);
1542                         err = -EBUSY;
1543                         goto out_resume;
1544                 }
1545
1546                 if (in_pq(ubi, e)) {
1547                         prot_queue_del(ubi, e->pnum);
1548                         wl_tree_add(e, &ubi->scrub);
1549                         spin_unlock(&ubi->wl_lock);
1550
1551                         err = ensure_wear_leveling(ubi, 1);
1552                 } else if (in_wl_tree(e, &ubi->used)) {
1553                         rb_erase(&e->u.rb, &ubi->used);
1554                         wl_tree_add(e, &ubi->scrub);
1555                         spin_unlock(&ubi->wl_lock);
1556
1557                         err = ensure_wear_leveling(ubi, 1);
1558                 } else if (in_wl_tree(e, &ubi->free)) {
1559                         rb_erase(&e->u.rb, &ubi->free);
1560                         ubi->free_count--;
1561                         spin_unlock(&ubi->wl_lock);
1562
1563                         /*
1564                          * This PEB is empty we can schedule it for
1565                          * erasure right away. No wear leveling needed.
1566                          */
1567                         err = schedule_erase(ubi, e, UBI_UNKNOWN, UBI_UNKNOWN,
1568                                              force ? 0 : 1, true);
1569                 } else {
1570                         spin_unlock(&ubi->wl_lock);
1571                         err = -EAGAIN;
1572                 }
1573
1574                 if (!err && !force)
1575                         err = -EUCLEAN;
1576         } else {
1577                 err = 0;
1578         }
1579
1580 out_resume:
1581         up_write(&ubi->work_sem);
1582 out:
1583
1584         return err;
1585 }
1586
1587 /**
1588  * tree_destroy - destroy an RB-tree.
1589  * @ubi: UBI device description object
1590  * @root: the root of the tree to destroy
1591  */
1592 static void tree_destroy(struct ubi_device *ubi, struct rb_root *root)
1593 {
1594         struct rb_node *rb;
1595         struct ubi_wl_entry *e;
1596
1597         rb = root->rb_node;
1598         while (rb) {
1599                 if (rb->rb_left)
1600                         rb = rb->rb_left;
1601                 else if (rb->rb_right)
1602                         rb = rb->rb_right;
1603                 else {
1604                         e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1605
1606                         rb = rb_parent(rb);
1607                         if (rb) {
1608                                 if (rb->rb_left == &e->u.rb)
1609                                         rb->rb_left = NULL;
1610                                 else
1611                                         rb->rb_right = NULL;
1612                         }
1613
1614                         wl_entry_destroy(ubi, e);
1615                 }
1616         }
1617 }
1618
1619 /**
1620  * ubi_thread - UBI background thread.
1621  * @u: the UBI device description object pointer
1622  */
1623 int ubi_thread(void *u)
1624 {
1625         int failures = 0;
1626         struct ubi_device *ubi = u;
1627
1628         ubi_msg(ubi, "background thread \"%s\" started, PID %d",
1629                 ubi->bgt_name, task_pid_nr(current));
1630
1631         set_freezable();
1632         for (;;) {
1633                 int err;
1634
1635                 if (kthread_should_stop())
1636                         break;
1637
1638                 if (try_to_freeze())
1639                         continue;
1640
1641                 spin_lock(&ubi->wl_lock);
1642                 if (list_empty(&ubi->works) || ubi->ro_mode ||
1643                     !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1644                         set_current_state(TASK_INTERRUPTIBLE);
1645                         spin_unlock(&ubi->wl_lock);
1646                         schedule();
1647                         continue;
1648                 }
1649                 spin_unlock(&ubi->wl_lock);
1650
1651                 err = do_work(ubi);
1652                 if (err) {
1653                         ubi_err(ubi, "%s: work failed with error code %d",
1654                                 ubi->bgt_name, err);
1655                         if (failures++ > WL_MAX_FAILURES) {
1656                                 /*
1657                                  * Too many failures, disable the thread and
1658                                  * switch to read-only mode.
1659                                  */
1660                                 ubi_msg(ubi, "%s: %d consecutive failures",
1661                                         ubi->bgt_name, WL_MAX_FAILURES);
1662                                 ubi_ro_mode(ubi);
1663                                 ubi->thread_enabled = 0;
1664                                 continue;
1665                         }
1666                 } else
1667                         failures = 0;
1668
1669                 cond_resched();
1670         }
1671
1672         dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1673         ubi->thread_enabled = 0;
1674         return 0;
1675 }
1676
1677 /**
1678  * shutdown_work - shutdown all pending works.
1679  * @ubi: UBI device description object
1680  */
1681 static void shutdown_work(struct ubi_device *ubi)
1682 {
1683         while (!list_empty(&ubi->works)) {
1684                 struct ubi_work *wrk;
1685
1686                 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1687                 list_del(&wrk->list);
1688                 wrk->func(ubi, wrk, 1);
1689                 ubi->works_count -= 1;
1690                 ubi_assert(ubi->works_count >= 0);
1691         }
1692 }
1693
1694 /**
1695  * erase_aeb - erase a PEB given in UBI attach info PEB
1696  * @ubi: UBI device description object
1697  * @aeb: UBI attach info PEB
1698  * @sync: If true, erase synchronously. Otherwise schedule for erasure
1699  */
1700 static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync)
1701 {
1702         struct ubi_wl_entry *e;
1703         int err;
1704
1705         e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1706         if (!e)
1707                 return -ENOMEM;
1708
1709         e->pnum = aeb->pnum;
1710         e->ec = aeb->ec;
1711         ubi->lookuptbl[e->pnum] = e;
1712
1713         if (sync) {
1714                 err = sync_erase(ubi, e, false);
1715                 if (err)
1716                         goto out_free;
1717
1718                 wl_tree_add(e, &ubi->free);
1719                 ubi->free_count++;
1720         } else {
1721                 err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
1722                 if (err)
1723                         goto out_free;
1724         }
1725
1726         return 0;
1727
1728 out_free:
1729         wl_entry_destroy(ubi, e);
1730
1731         return err;
1732 }
1733
1734 /**
1735  * ubi_wl_init - initialize the WL sub-system using attaching information.
1736  * @ubi: UBI device description object
1737  * @ai: attaching information
1738  *
1739  * This function returns zero in case of success, and a negative error code in
1740  * case of failure.
1741  */
1742 int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1743 {
1744         int err, i, reserved_pebs, found_pebs = 0;
1745         struct rb_node *rb1, *rb2;
1746         struct ubi_ainf_volume *av;
1747         struct ubi_ainf_peb *aeb, *tmp;
1748         struct ubi_wl_entry *e;
1749
1750         ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1751         spin_lock_init(&ubi->wl_lock);
1752         mutex_init(&ubi->move_mutex);
1753         init_rwsem(&ubi->work_sem);
1754         ubi->max_ec = ai->max_ec;
1755         INIT_LIST_HEAD(&ubi->works);
1756
1757         sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1758
1759         err = -ENOMEM;
1760         ubi->lookuptbl = kcalloc(ubi->peb_count, sizeof(void *), GFP_KERNEL);
1761         if (!ubi->lookuptbl)
1762                 return err;
1763
1764         for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1765                 INIT_LIST_HEAD(&ubi->pq[i]);
1766         ubi->pq_head = 0;
1767
1768         ubi->free_count = 0;
1769         list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
1770                 cond_resched();
1771
1772                 err = erase_aeb(ubi, aeb, false);
1773                 if (err)
1774                         goto out_free;
1775
1776                 found_pebs++;
1777         }
1778
1779         list_for_each_entry(aeb, &ai->free, u.list) {
1780                 cond_resched();
1781
1782                 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1783                 if (!e) {
1784                         err = -ENOMEM;
1785                         goto out_free;
1786                 }
1787
1788                 e->pnum = aeb->pnum;
1789                 e->ec = aeb->ec;
1790                 ubi_assert(e->ec >= 0);
1791
1792                 wl_tree_add(e, &ubi->free);
1793                 ubi->free_count++;
1794
1795                 ubi->lookuptbl[e->pnum] = e;
1796
1797                 found_pebs++;
1798         }
1799
1800         ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1801                 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1802                         cond_resched();
1803
1804                         e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1805                         if (!e) {
1806                                 err = -ENOMEM;
1807                                 goto out_free;
1808                         }
1809
1810                         e->pnum = aeb->pnum;
1811                         e->ec = aeb->ec;
1812                         ubi->lookuptbl[e->pnum] = e;
1813
1814                         if (!aeb->scrub) {
1815                                 dbg_wl("add PEB %d EC %d to the used tree",
1816                                        e->pnum, e->ec);
1817                                 wl_tree_add(e, &ubi->used);
1818                         } else {
1819                                 dbg_wl("add PEB %d EC %d to the scrub tree",
1820                                        e->pnum, e->ec);
1821                                 wl_tree_add(e, &ubi->scrub);
1822                         }
1823
1824                         found_pebs++;
1825                 }
1826         }
1827
1828         list_for_each_entry(aeb, &ai->fastmap, u.list) {
1829                 cond_resched();
1830
1831                 e = ubi_find_fm_block(ubi, aeb->pnum);
1832
1833                 if (e) {
1834                         ubi_assert(!ubi->lookuptbl[e->pnum]);
1835                         ubi->lookuptbl[e->pnum] = e;
1836                 } else {
1837                         bool sync = false;
1838
1839                         /*
1840                          * Usually old Fastmap PEBs are scheduled for erasure
1841                          * and we don't have to care about them but if we face
1842                          * an power cut before scheduling them we need to
1843                          * take care of them here.
1844                          */
1845                         if (ubi->lookuptbl[aeb->pnum])
1846                                 continue;
1847
1848                         /*
1849                          * The fastmap update code might not find a free PEB for
1850                          * writing the fastmap anchor to and then reuses the
1851                          * current fastmap anchor PEB. When this PEB gets erased
1852                          * and a power cut happens before it is written again we
1853                          * must make sure that the fastmap attach code doesn't
1854                          * find any outdated fastmap anchors, hence we erase the
1855                          * outdated fastmap anchor PEBs synchronously here.
1856                          */
1857                         if (aeb->vol_id == UBI_FM_SB_VOLUME_ID)
1858                                 sync = true;
1859
1860                         err = erase_aeb(ubi, aeb, sync);
1861                         if (err)
1862                                 goto out_free;
1863                 }
1864
1865                 found_pebs++;
1866         }
1867
1868         dbg_wl("found %i PEBs", found_pebs);
1869
1870         ubi_assert(ubi->good_peb_count == found_pebs);
1871
1872         reserved_pebs = WL_RESERVED_PEBS;
1873         ubi_fastmap_init(ubi, &reserved_pebs);
1874
1875         if (ubi->avail_pebs < reserved_pebs) {
1876                 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1877                         ubi->avail_pebs, reserved_pebs);
1878                 if (ubi->corr_peb_count)
1879                         ubi_err(ubi, "%d PEBs are corrupted and not used",
1880                                 ubi->corr_peb_count);
1881                 err = -ENOSPC;
1882                 goto out_free;
1883         }
1884         ubi->avail_pebs -= reserved_pebs;
1885         ubi->rsvd_pebs += reserved_pebs;
1886
1887         /* Schedule wear-leveling if needed */
1888         err = ensure_wear_leveling(ubi, 0);
1889         if (err)
1890                 goto out_free;
1891
1892         return 0;
1893
1894 out_free:
1895         shutdown_work(ubi);
1896         tree_destroy(ubi, &ubi->used);
1897         tree_destroy(ubi, &ubi->free);
1898         tree_destroy(ubi, &ubi->scrub);
1899         kfree(ubi->lookuptbl);
1900         return err;
1901 }
1902
1903 /**
1904  * protection_queue_destroy - destroy the protection queue.
1905  * @ubi: UBI device description object
1906  */
1907 static void protection_queue_destroy(struct ubi_device *ubi)
1908 {
1909         int i;
1910         struct ubi_wl_entry *e, *tmp;
1911
1912         for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
1913                 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1914                         list_del(&e->u.list);
1915                         wl_entry_destroy(ubi, e);
1916                 }
1917         }
1918 }
1919
1920 /**
1921  * ubi_wl_close - close the wear-leveling sub-system.
1922  * @ubi: UBI device description object
1923  */
1924 void ubi_wl_close(struct ubi_device *ubi)
1925 {
1926         dbg_wl("close the WL sub-system");
1927         ubi_fastmap_close(ubi);
1928         shutdown_work(ubi);
1929         protection_queue_destroy(ubi);
1930         tree_destroy(ubi, &ubi->used);
1931         tree_destroy(ubi, &ubi->erroneous);
1932         tree_destroy(ubi, &ubi->free);
1933         tree_destroy(ubi, &ubi->scrub);
1934         kfree(ubi->lookuptbl);
1935 }
1936
1937 /**
1938  * self_check_ec - make sure that the erase counter of a PEB is correct.
1939  * @ubi: UBI device description object
1940  * @pnum: the physical eraseblock number to check
1941  * @ec: the erase counter to check
1942  *
1943  * This function returns zero if the erase counter of physical eraseblock @pnum
1944  * is equivalent to @ec, and a negative error code if not or if an error
1945  * occurred.
1946  */
1947 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
1948 {
1949         int err;
1950         long long read_ec;
1951         struct ubi_ec_hdr *ec_hdr;
1952
1953         if (!ubi_dbg_chk_gen(ubi))
1954                 return 0;
1955
1956         ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1957         if (!ec_hdr)
1958                 return -ENOMEM;
1959
1960         err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1961         if (err && err != UBI_IO_BITFLIPS) {
1962                 /* The header does not have to exist */
1963                 err = 0;
1964                 goto out_free;
1965         }
1966
1967         read_ec = be64_to_cpu(ec_hdr->ec);
1968         if (ec != read_ec && read_ec - ec > 1) {
1969                 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1970                 ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
1971                 dump_stack();
1972                 err = 1;
1973         } else
1974                 err = 0;
1975
1976 out_free:
1977         kfree(ec_hdr);
1978         return err;
1979 }
1980
1981 /**
1982  * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
1983  * @ubi: UBI device description object
1984  * @e: the wear-leveling entry to check
1985  * @root: the root of the tree
1986  *
1987  * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
1988  * is not.
1989  */
1990 static int self_check_in_wl_tree(const struct ubi_device *ubi,
1991                                  struct ubi_wl_entry *e, struct rb_root *root)
1992 {
1993         if (!ubi_dbg_chk_gen(ubi))
1994                 return 0;
1995
1996         if (in_wl_tree(e, root))
1997                 return 0;
1998
1999         ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
2000                 e->pnum, e->ec, root);
2001         dump_stack();
2002         return -EINVAL;
2003 }
2004
2005 /**
2006  * self_check_in_pq - check if wear-leveling entry is in the protection
2007  *                        queue.
2008  * @ubi: UBI device description object
2009  * @e: the wear-leveling entry to check
2010  *
2011  * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
2012  */
2013 static int self_check_in_pq(const struct ubi_device *ubi,
2014                             struct ubi_wl_entry *e)
2015 {
2016         if (!ubi_dbg_chk_gen(ubi))
2017                 return 0;
2018
2019         if (in_pq(ubi, e))
2020                 return 0;
2021
2022         ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
2023                 e->pnum, e->ec);
2024         dump_stack();
2025         return -EINVAL;
2026 }
2027 #ifndef CONFIG_MTD_UBI_FASTMAP
2028 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
2029 {
2030         struct ubi_wl_entry *e;
2031
2032         e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
2033         self_check_in_wl_tree(ubi, e, &ubi->free);
2034         ubi->free_count--;
2035         ubi_assert(ubi->free_count >= 0);
2036         rb_erase(&e->u.rb, &ubi->free);
2037
2038         return e;
2039 }
2040
2041 /**
2042  * produce_free_peb - produce a free physical eraseblock.
2043  * @ubi: UBI device description object
2044  *
2045  * This function tries to make a free PEB by means of synchronous execution of
2046  * pending works. This may be needed if, for example the background thread is
2047  * disabled. Returns zero in case of success and a negative error code in case
2048  * of failure.
2049  */
2050 static int produce_free_peb(struct ubi_device *ubi)
2051 {
2052         int err;
2053
2054         while (!ubi->free.rb_node && ubi->works_count) {
2055                 spin_unlock(&ubi->wl_lock);
2056
2057                 dbg_wl("do one work synchronously");
2058                 err = do_work(ubi);
2059
2060                 spin_lock(&ubi->wl_lock);
2061                 if (err)
2062                         return err;
2063         }
2064
2065         return 0;
2066 }
2067
2068 /**
2069  * ubi_wl_get_peb - get a physical eraseblock.
2070  * @ubi: UBI device description object
2071  *
2072  * This function returns a physical eraseblock in case of success and a
2073  * negative error code in case of failure.
2074  * Returns with ubi->fm_eba_sem held in read mode!
2075  */
2076 int ubi_wl_get_peb(struct ubi_device *ubi)
2077 {
2078         int err;
2079         struct ubi_wl_entry *e;
2080
2081 retry:
2082         down_read(&ubi->fm_eba_sem);
2083         spin_lock(&ubi->wl_lock);
2084         if (!ubi->free.rb_node) {
2085                 if (ubi->works_count == 0) {
2086                         ubi_err(ubi, "no free eraseblocks");
2087                         ubi_assert(list_empty(&ubi->works));
2088                         spin_unlock(&ubi->wl_lock);
2089                         return -ENOSPC;
2090                 }
2091
2092                 err = produce_free_peb(ubi);
2093                 if (err < 0) {
2094                         spin_unlock(&ubi->wl_lock);
2095                         return err;
2096                 }
2097                 spin_unlock(&ubi->wl_lock);
2098                 up_read(&ubi->fm_eba_sem);
2099                 goto retry;
2100
2101         }
2102         e = wl_get_wle(ubi);
2103         prot_queue_add(ubi, e);
2104         spin_unlock(&ubi->wl_lock);
2105
2106         err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
2107                                     ubi->peb_size - ubi->vid_hdr_aloffset);
2108         if (err) {
2109                 ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
2110                 return err;
2111         }
2112
2113         return e->pnum;
2114 }
2115 #else
2116 #include "fastmap-wl.c"
2117 #endif