GNU Linux-libre 5.10.153-gnu1
[releases.git] / drivers / mtd / ubi / wl.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (c) International Business Machines Corp., 2006
4  *
5  * Authors: Artem Bityutskiy (Битюцкий Артём), Thomas Gleixner
6  */
7
8 /*
9  * UBI wear-leveling sub-system.
10  *
11  * This sub-system is responsible for wear-leveling. It works in terms of
12  * physical eraseblocks and erase counters and knows nothing about logical
13  * eraseblocks, volumes, etc. From this sub-system's perspective all physical
14  * eraseblocks are of two types - used and free. Used physical eraseblocks are
15  * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
16  * eraseblocks are those that were put by the 'ubi_wl_put_peb()' function.
17  *
18  * Physical eraseblocks returned by 'ubi_wl_get_peb()' have only erase counter
19  * header. The rest of the physical eraseblock contains only %0xFF bytes.
20  *
21  * When physical eraseblocks are returned to the WL sub-system by means of the
22  * 'ubi_wl_put_peb()' function, they are scheduled for erasure. The erasure is
23  * done asynchronously in context of the per-UBI device background thread,
24  * which is also managed by the WL sub-system.
25  *
26  * The wear-leveling is ensured by means of moving the contents of used
27  * physical eraseblocks with low erase counter to free physical eraseblocks
28  * with high erase counter.
29  *
30  * If the WL sub-system fails to erase a physical eraseblock, it marks it as
31  * bad.
32  *
33  * This sub-system is also responsible for scrubbing. If a bit-flip is detected
34  * in a physical eraseblock, it has to be moved. Technically this is the same
35  * as moving it for wear-leveling reasons.
36  *
37  * As it was said, for the UBI sub-system all physical eraseblocks are either
38  * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
39  * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
40  * RB-trees, as well as (temporarily) in the @wl->pq queue.
41  *
42  * When the WL sub-system returns a physical eraseblock, the physical
43  * eraseblock is protected from being moved for some "time". For this reason,
44  * the physical eraseblock is not directly moved from the @wl->free tree to the
45  * @wl->used tree. There is a protection queue in between where this
46  * physical eraseblock is temporarily stored (@wl->pq).
47  *
48  * All this protection stuff is needed because:
49  *  o we don't want to move physical eraseblocks just after we have given them
50  *    to the user; instead, we first want to let users fill them up with data;
51  *
52  *  o there is a chance that the user will put the physical eraseblock very
53  *    soon, so it makes sense not to move it for some time, but wait.
54  *
55  * Physical eraseblocks stay protected only for limited time. But the "time" is
56  * measured in erase cycles in this case. This is implemented with help of the
57  * protection queue. Eraseblocks are put to the tail of this queue when they
58  * are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
59  * head of the queue on each erase operation (for any eraseblock). So the
60  * length of the queue defines how may (global) erase cycles PEBs are protected.
61  *
62  * To put it differently, each physical eraseblock has 2 main states: free and
63  * used. The former state corresponds to the @wl->free tree. The latter state
64  * is split up on several sub-states:
65  * o the WL movement is allowed (@wl->used tree);
66  * o the WL movement is disallowed (@wl->erroneous) because the PEB is
67  *   erroneous - e.g., there was a read error;
68  * o the WL movement is temporarily prohibited (@wl->pq queue);
69  * o scrubbing is needed (@wl->scrub tree).
70  *
71  * Depending on the sub-state, wear-leveling entries of the used physical
72  * eraseblocks may be kept in one of those structures.
73  *
74  * Note, in this implementation, we keep a small in-RAM object for each physical
75  * eraseblock. This is surely not a scalable solution. But it appears to be good
76  * enough for moderately large flashes and it is simple. In future, one may
77  * re-work this sub-system and make it more scalable.
78  *
79  * At the moment this sub-system does not utilize the sequence number, which
80  * was introduced relatively recently. But it would be wise to do this because
81  * the sequence number of a logical eraseblock characterizes how old is it. For
82  * example, when we move a PEB with low erase counter, and we need to pick the
83  * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
84  * pick target PEB with an average EC if our PEB is not very "old". This is a
85  * room for future re-works of the WL sub-system.
86  */
87
88 #include <linux/slab.h>
89 #include <linux/crc32.h>
90 #include <linux/freezer.h>
91 #include <linux/kthread.h>
92 #include "ubi.h"
93 #include "wl.h"
94
95 /* Number of physical eraseblocks reserved for wear-leveling purposes */
96 #define WL_RESERVED_PEBS 1
97
98 /*
99  * Maximum difference between two erase counters. If this threshold is
100  * exceeded, the WL sub-system starts moving data from used physical
101  * eraseblocks with low erase counter to free physical eraseblocks with high
102  * erase counter.
103  */
104 #define UBI_WL_THRESHOLD CONFIG_MTD_UBI_WL_THRESHOLD
105
106 /*
107  * When a physical eraseblock is moved, the WL sub-system has to pick the target
108  * physical eraseblock to move to. The simplest way would be just to pick the
109  * one with the highest erase counter. But in certain workloads this could lead
110  * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
111  * situation when the picked physical eraseblock is constantly erased after the
112  * data is written to it. So, we have a constant which limits the highest erase
113  * counter of the free physical eraseblock to pick. Namely, the WL sub-system
114  * does not pick eraseblocks with erase counter greater than the lowest erase
115  * counter plus %WL_FREE_MAX_DIFF.
116  */
117 #define WL_FREE_MAX_DIFF (2*UBI_WL_THRESHOLD)
118
119 /*
120  * Maximum number of consecutive background thread failures which is enough to
121  * switch to read-only mode.
122  */
123 #define WL_MAX_FAILURES 32
124
125 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
126 static int self_check_in_wl_tree(const struct ubi_device *ubi,
127                                  struct ubi_wl_entry *e, struct rb_root *root);
128 static int self_check_in_pq(const struct ubi_device *ubi,
129                             struct ubi_wl_entry *e);
130
131 /**
132  * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
133  * @e: the wear-leveling entry to add
134  * @root: the root of the tree
135  *
136  * Note, we use (erase counter, physical eraseblock number) pairs as keys in
137  * the @ubi->used and @ubi->free RB-trees.
138  */
139 static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
140 {
141         struct rb_node **p, *parent = NULL;
142
143         p = &root->rb_node;
144         while (*p) {
145                 struct ubi_wl_entry *e1;
146
147                 parent = *p;
148                 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
149
150                 if (e->ec < e1->ec)
151                         p = &(*p)->rb_left;
152                 else if (e->ec > e1->ec)
153                         p = &(*p)->rb_right;
154                 else {
155                         ubi_assert(e->pnum != e1->pnum);
156                         if (e->pnum < e1->pnum)
157                                 p = &(*p)->rb_left;
158                         else
159                                 p = &(*p)->rb_right;
160                 }
161         }
162
163         rb_link_node(&e->u.rb, parent, p);
164         rb_insert_color(&e->u.rb, root);
165 }
166
167 /**
168  * wl_tree_destroy - destroy a wear-leveling entry.
169  * @ubi: UBI device description object
170  * @e: the wear-leveling entry to add
171  *
172  * This function destroys a wear leveling entry and removes
173  * the reference from the lookup table.
174  */
175 static void wl_entry_destroy(struct ubi_device *ubi, struct ubi_wl_entry *e)
176 {
177         ubi->lookuptbl[e->pnum] = NULL;
178         kmem_cache_free(ubi_wl_entry_slab, e);
179 }
180
181 /**
182  * do_work - do one pending work.
183  * @ubi: UBI device description object
184  *
185  * This function returns zero in case of success and a negative error code in
186  * case of failure.
187  */
188 static int do_work(struct ubi_device *ubi)
189 {
190         int err;
191         struct ubi_work *wrk;
192
193         cond_resched();
194
195         /*
196          * @ubi->work_sem is used to synchronize with the workers. Workers take
197          * it in read mode, so many of them may be doing works at a time. But
198          * the queue flush code has to be sure the whole queue of works is
199          * done, and it takes the mutex in write mode.
200          */
201         down_read(&ubi->work_sem);
202         spin_lock(&ubi->wl_lock);
203         if (list_empty(&ubi->works)) {
204                 spin_unlock(&ubi->wl_lock);
205                 up_read(&ubi->work_sem);
206                 return 0;
207         }
208
209         wrk = list_entry(ubi->works.next, struct ubi_work, list);
210         list_del(&wrk->list);
211         ubi->works_count -= 1;
212         ubi_assert(ubi->works_count >= 0);
213         spin_unlock(&ubi->wl_lock);
214
215         /*
216          * Call the worker function. Do not touch the work structure
217          * after this call as it will have been freed or reused by that
218          * time by the worker function.
219          */
220         err = wrk->func(ubi, wrk, 0);
221         if (err)
222                 ubi_err(ubi, "work failed with error code %d", err);
223         up_read(&ubi->work_sem);
224
225         return err;
226 }
227
228 /**
229  * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
230  * @e: the wear-leveling entry to check
231  * @root: the root of the tree
232  *
233  * This function returns non-zero if @e is in the @root RB-tree and zero if it
234  * is not.
235  */
236 static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
237 {
238         struct rb_node *p;
239
240         p = root->rb_node;
241         while (p) {
242                 struct ubi_wl_entry *e1;
243
244                 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
245
246                 if (e->pnum == e1->pnum) {
247                         ubi_assert(e == e1);
248                         return 1;
249                 }
250
251                 if (e->ec < e1->ec)
252                         p = p->rb_left;
253                 else if (e->ec > e1->ec)
254                         p = p->rb_right;
255                 else {
256                         ubi_assert(e->pnum != e1->pnum);
257                         if (e->pnum < e1->pnum)
258                                 p = p->rb_left;
259                         else
260                                 p = p->rb_right;
261                 }
262         }
263
264         return 0;
265 }
266
267 /**
268  * in_pq - check if a wear-leveling entry is present in the protection queue.
269  * @ubi: UBI device description object
270  * @e: the wear-leveling entry to check
271  *
272  * This function returns non-zero if @e is in the protection queue and zero
273  * if it is not.
274  */
275 static inline int in_pq(const struct ubi_device *ubi, struct ubi_wl_entry *e)
276 {
277         struct ubi_wl_entry *p;
278         int i;
279
280         for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
281                 list_for_each_entry(p, &ubi->pq[i], u.list)
282                         if (p == e)
283                                 return 1;
284
285         return 0;
286 }
287
288 /**
289  * prot_queue_add - add physical eraseblock to the protection queue.
290  * @ubi: UBI device description object
291  * @e: the physical eraseblock to add
292  *
293  * This function adds @e to the tail of the protection queue @ubi->pq, where
294  * @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
295  * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
296  * be locked.
297  */
298 static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
299 {
300         int pq_tail = ubi->pq_head - 1;
301
302         if (pq_tail < 0)
303                 pq_tail = UBI_PROT_QUEUE_LEN - 1;
304         ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
305         list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
306         dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
307 }
308
309 /**
310  * find_wl_entry - find wear-leveling entry closest to certain erase counter.
311  * @ubi: UBI device description object
312  * @root: the RB-tree where to look for
313  * @diff: maximum possible difference from the smallest erase counter
314  *
315  * This function looks for a wear leveling entry with erase counter closest to
316  * min + @diff, where min is the smallest erase counter.
317  */
318 static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
319                                           struct rb_root *root, int diff)
320 {
321         struct rb_node *p;
322         struct ubi_wl_entry *e;
323         int max;
324
325         e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
326         max = e->ec + diff;
327
328         p = root->rb_node;
329         while (p) {
330                 struct ubi_wl_entry *e1;
331
332                 e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
333                 if (e1->ec >= max)
334                         p = p->rb_left;
335                 else {
336                         p = p->rb_right;
337                         e = e1;
338                 }
339         }
340
341         return e;
342 }
343
344 /**
345  * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
346  * @ubi: UBI device description object
347  * @root: the RB-tree where to look for
348  *
349  * This function looks for a wear leveling entry with medium erase counter,
350  * but not greater or equivalent than the lowest erase counter plus
351  * %WL_FREE_MAX_DIFF/2.
352  */
353 static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
354                                                struct rb_root *root)
355 {
356         struct ubi_wl_entry *e, *first, *last;
357
358         first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
359         last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
360
361         if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
362                 e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
363
364                 /* If no fastmap has been written and this WL entry can be used
365                  * as anchor PEB, hold it back and return the second best
366                  * WL entry such that fastmap can use the anchor PEB later. */
367                 e = may_reserve_for_fm(ubi, e, root);
368         } else
369                 e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
370
371         return e;
372 }
373
374 /**
375  * wl_get_wle - get a mean wl entry to be used by ubi_wl_get_peb() or
376  * refill_wl_user_pool().
377  * @ubi: UBI device description object
378  *
379  * This function returns a a wear leveling entry in case of success and
380  * NULL in case of failure.
381  */
382 static struct ubi_wl_entry *wl_get_wle(struct ubi_device *ubi)
383 {
384         struct ubi_wl_entry *e;
385
386         e = find_mean_wl_entry(ubi, &ubi->free);
387         if (!e) {
388                 ubi_err(ubi, "no free eraseblocks");
389                 return NULL;
390         }
391
392         self_check_in_wl_tree(ubi, e, &ubi->free);
393
394         /*
395          * Move the physical eraseblock to the protection queue where it will
396          * be protected from being moved for some time.
397          */
398         rb_erase(&e->u.rb, &ubi->free);
399         ubi->free_count--;
400         dbg_wl("PEB %d EC %d", e->pnum, e->ec);
401
402         return e;
403 }
404
405 /**
406  * prot_queue_del - remove a physical eraseblock from the protection queue.
407  * @ubi: UBI device description object
408  * @pnum: the physical eraseblock to remove
409  *
410  * This function deletes PEB @pnum from the protection queue and returns zero
411  * in case of success and %-ENODEV if the PEB was not found.
412  */
413 static int prot_queue_del(struct ubi_device *ubi, int pnum)
414 {
415         struct ubi_wl_entry *e;
416
417         e = ubi->lookuptbl[pnum];
418         if (!e)
419                 return -ENODEV;
420
421         if (self_check_in_pq(ubi, e))
422                 return -ENODEV;
423
424         list_del(&e->u.list);
425         dbg_wl("deleted PEB %d from the protection queue", e->pnum);
426         return 0;
427 }
428
429 /**
430  * sync_erase - synchronously erase a physical eraseblock.
431  * @ubi: UBI device description object
432  * @e: the the physical eraseblock to erase
433  * @torture: if the physical eraseblock has to be tortured
434  *
435  * This function returns zero in case of success and a negative error code in
436  * case of failure.
437  */
438 static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
439                       int torture)
440 {
441         int err;
442         struct ubi_ec_hdr *ec_hdr;
443         unsigned long long ec = e->ec;
444
445         dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec);
446
447         err = self_check_ec(ubi, e->pnum, e->ec);
448         if (err)
449                 return -EINVAL;
450
451         ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
452         if (!ec_hdr)
453                 return -ENOMEM;
454
455         err = ubi_io_sync_erase(ubi, e->pnum, torture);
456         if (err < 0)
457                 goto out_free;
458
459         ec += err;
460         if (ec > UBI_MAX_ERASECOUNTER) {
461                 /*
462                  * Erase counter overflow. Upgrade UBI and use 64-bit
463                  * erase counters internally.
464                  */
465                 ubi_err(ubi, "erase counter overflow at PEB %d, EC %llu",
466                         e->pnum, ec);
467                 err = -EINVAL;
468                 goto out_free;
469         }
470
471         dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec);
472
473         ec_hdr->ec = cpu_to_be64(ec);
474
475         err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr);
476         if (err)
477                 goto out_free;
478
479         e->ec = ec;
480         spin_lock(&ubi->wl_lock);
481         if (e->ec > ubi->max_ec)
482                 ubi->max_ec = e->ec;
483         spin_unlock(&ubi->wl_lock);
484
485 out_free:
486         kfree(ec_hdr);
487         return err;
488 }
489
490 /**
491  * serve_prot_queue - check if it is time to stop protecting PEBs.
492  * @ubi: UBI device description object
493  *
494  * This function is called after each erase operation and removes PEBs from the
495  * tail of the protection queue. These PEBs have been protected for long enough
496  * and should be moved to the used tree.
497  */
498 static void serve_prot_queue(struct ubi_device *ubi)
499 {
500         struct ubi_wl_entry *e, *tmp;
501         int count;
502
503         /*
504          * There may be several protected physical eraseblock to remove,
505          * process them all.
506          */
507 repeat:
508         count = 0;
509         spin_lock(&ubi->wl_lock);
510         list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
511                 dbg_wl("PEB %d EC %d protection over, move to used tree",
512                         e->pnum, e->ec);
513
514                 list_del(&e->u.list);
515                 wl_tree_add(e, &ubi->used);
516                 if (count++ > 32) {
517                         /*
518                          * Let's be nice and avoid holding the spinlock for
519                          * too long.
520                          */
521                         spin_unlock(&ubi->wl_lock);
522                         cond_resched();
523                         goto repeat;
524                 }
525         }
526
527         ubi->pq_head += 1;
528         if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
529                 ubi->pq_head = 0;
530         ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
531         spin_unlock(&ubi->wl_lock);
532 }
533
534 /**
535  * __schedule_ubi_work - schedule a work.
536  * @ubi: UBI device description object
537  * @wrk: the work to schedule
538  *
539  * This function adds a work defined by @wrk to the tail of the pending works
540  * list. Can only be used if ubi->work_sem is already held in read mode!
541  */
542 static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
543 {
544         spin_lock(&ubi->wl_lock);
545         list_add_tail(&wrk->list, &ubi->works);
546         ubi_assert(ubi->works_count >= 0);
547         ubi->works_count += 1;
548         if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi))
549                 wake_up_process(ubi->bgt_thread);
550         spin_unlock(&ubi->wl_lock);
551 }
552
553 /**
554  * schedule_ubi_work - schedule a work.
555  * @ubi: UBI device description object
556  * @wrk: the work to schedule
557  *
558  * This function adds a work defined by @wrk to the tail of the pending works
559  * list.
560  */
561 static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
562 {
563         down_read(&ubi->work_sem);
564         __schedule_ubi_work(ubi, wrk);
565         up_read(&ubi->work_sem);
566 }
567
568 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
569                         int shutdown);
570
571 /**
572  * schedule_erase - schedule an erase work.
573  * @ubi: UBI device description object
574  * @e: the WL entry of the physical eraseblock to erase
575  * @vol_id: the volume ID that last used this PEB
576  * @lnum: the last used logical eraseblock number for the PEB
577  * @torture: if the physical eraseblock has to be tortured
578  *
579  * This function returns zero in case of success and a %-ENOMEM in case of
580  * failure.
581  */
582 static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
583                           int vol_id, int lnum, int torture, bool nested)
584 {
585         struct ubi_work *wl_wrk;
586
587         ubi_assert(e);
588
589         dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
590                e->pnum, e->ec, torture);
591
592         wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
593         if (!wl_wrk)
594                 return -ENOMEM;
595
596         wl_wrk->func = &erase_worker;
597         wl_wrk->e = e;
598         wl_wrk->vol_id = vol_id;
599         wl_wrk->lnum = lnum;
600         wl_wrk->torture = torture;
601
602         if (nested)
603                 __schedule_ubi_work(ubi, wl_wrk);
604         else
605                 schedule_ubi_work(ubi, wl_wrk);
606         return 0;
607 }
608
609 static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk);
610 /**
611  * do_sync_erase - run the erase worker synchronously.
612  * @ubi: UBI device description object
613  * @e: the WL entry of the physical eraseblock to erase
614  * @vol_id: the volume ID that last used this PEB
615  * @lnum: the last used logical eraseblock number for the PEB
616  * @torture: if the physical eraseblock has to be tortured
617  *
618  */
619 static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
620                          int vol_id, int lnum, int torture)
621 {
622         struct ubi_work wl_wrk;
623
624         dbg_wl("sync erase of PEB %i", e->pnum);
625
626         wl_wrk.e = e;
627         wl_wrk.vol_id = vol_id;
628         wl_wrk.lnum = lnum;
629         wl_wrk.torture = torture;
630
631         return __erase_worker(ubi, &wl_wrk);
632 }
633
634 static int ensure_wear_leveling(struct ubi_device *ubi, int nested);
635 /**
636  * wear_leveling_worker - wear-leveling worker function.
637  * @ubi: UBI device description object
638  * @wrk: the work object
639  * @shutdown: non-zero if the worker has to free memory and exit
640  * because the WL-subsystem is shutting down
641  *
642  * This function copies a more worn out physical eraseblock to a less worn out
643  * one. Returns zero in case of success and a negative error code in case of
644  * failure.
645  */
646 static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
647                                 int shutdown)
648 {
649         int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
650         int erase = 0, keep = 0, vol_id = -1, lnum = -1;
651         struct ubi_wl_entry *e1, *e2;
652         struct ubi_vid_io_buf *vidb;
653         struct ubi_vid_hdr *vid_hdr;
654         int dst_leb_clean = 0;
655
656         kfree(wrk);
657         if (shutdown)
658                 return 0;
659
660         vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
661         if (!vidb)
662                 return -ENOMEM;
663
664         vid_hdr = ubi_get_vid_hdr(vidb);
665
666         down_read(&ubi->fm_eba_sem);
667         mutex_lock(&ubi->move_mutex);
668         spin_lock(&ubi->wl_lock);
669         ubi_assert(!ubi->move_from && !ubi->move_to);
670         ubi_assert(!ubi->move_to_put);
671
672         if (!ubi->free.rb_node ||
673             (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
674                 /*
675                  * No free physical eraseblocks? Well, they must be waiting in
676                  * the queue to be erased. Cancel movement - it will be
677                  * triggered again when a free physical eraseblock appears.
678                  *
679                  * No used physical eraseblocks? They must be temporarily
680                  * protected from being moved. They will be moved to the
681                  * @ubi->used tree later and the wear-leveling will be
682                  * triggered again.
683                  */
684                 dbg_wl("cancel WL, a list is empty: free %d, used %d",
685                        !ubi->free.rb_node, !ubi->used.rb_node);
686                 goto out_cancel;
687         }
688
689 #ifdef CONFIG_MTD_UBI_FASTMAP
690         e1 = find_anchor_wl_entry(&ubi->used);
691         if (e1 && ubi->fm_anchor &&
692             (ubi->fm_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
693                 ubi->fm_do_produce_anchor = 1;
694                 /*
695                  * fm_anchor is no longer considered a good anchor.
696                  * NULL assignment also prevents multiple wear level checks
697                  * of this PEB.
698                  */
699                 wl_tree_add(ubi->fm_anchor, &ubi->free);
700                 ubi->fm_anchor = NULL;
701                 ubi->free_count++;
702         }
703
704         if (ubi->fm_do_produce_anchor) {
705                 if (!e1)
706                         goto out_cancel;
707                 e2 = get_peb_for_wl(ubi);
708                 if (!e2)
709                         goto out_cancel;
710
711                 self_check_in_wl_tree(ubi, e1, &ubi->used);
712                 rb_erase(&e1->u.rb, &ubi->used);
713                 dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
714                 ubi->fm_do_produce_anchor = 0;
715         } else if (!ubi->scrub.rb_node) {
716 #else
717         if (!ubi->scrub.rb_node) {
718 #endif
719                 /*
720                  * Now pick the least worn-out used physical eraseblock and a
721                  * highly worn-out free physical eraseblock. If the erase
722                  * counters differ much enough, start wear-leveling.
723                  */
724                 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
725                 e2 = get_peb_for_wl(ubi);
726                 if (!e2)
727                         goto out_cancel;
728
729                 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
730                         dbg_wl("no WL needed: min used EC %d, max free EC %d",
731                                e1->ec, e2->ec);
732
733                         /* Give the unused PEB back */
734                         wl_tree_add(e2, &ubi->free);
735                         ubi->free_count++;
736                         goto out_cancel;
737                 }
738                 self_check_in_wl_tree(ubi, e1, &ubi->used);
739                 rb_erase(&e1->u.rb, &ubi->used);
740                 dbg_wl("move PEB %d EC %d to PEB %d EC %d",
741                        e1->pnum, e1->ec, e2->pnum, e2->ec);
742         } else {
743                 /* Perform scrubbing */
744                 scrubbing = 1;
745                 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
746                 e2 = get_peb_for_wl(ubi);
747                 if (!e2)
748                         goto out_cancel;
749
750                 self_check_in_wl_tree(ubi, e1, &ubi->scrub);
751                 rb_erase(&e1->u.rb, &ubi->scrub);
752                 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
753         }
754
755         ubi->move_from = e1;
756         ubi->move_to = e2;
757         spin_unlock(&ubi->wl_lock);
758
759         /*
760          * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
761          * We so far do not know which logical eraseblock our physical
762          * eraseblock (@e1) belongs to. We have to read the volume identifier
763          * header first.
764          *
765          * Note, we are protected from this PEB being unmapped and erased. The
766          * 'ubi_wl_put_peb()' would wait for moving to be finished if the PEB
767          * which is being moved was unmapped.
768          */
769
770         err = ubi_io_read_vid_hdr(ubi, e1->pnum, vidb, 0);
771         if (err && err != UBI_IO_BITFLIPS) {
772                 dst_leb_clean = 1;
773                 if (err == UBI_IO_FF) {
774                         /*
775                          * We are trying to move PEB without a VID header. UBI
776                          * always write VID headers shortly after the PEB was
777                          * given, so we have a situation when it has not yet
778                          * had a chance to write it, because it was preempted.
779                          * So add this PEB to the protection queue so far,
780                          * because presumably more data will be written there
781                          * (including the missing VID header), and then we'll
782                          * move it.
783                          */
784                         dbg_wl("PEB %d has no VID header", e1->pnum);
785                         protect = 1;
786                         goto out_not_moved;
787                 } else if (err == UBI_IO_FF_BITFLIPS) {
788                         /*
789                          * The same situation as %UBI_IO_FF, but bit-flips were
790                          * detected. It is better to schedule this PEB for
791                          * scrubbing.
792                          */
793                         dbg_wl("PEB %d has no VID header but has bit-flips",
794                                e1->pnum);
795                         scrubbing = 1;
796                         goto out_not_moved;
797                 } else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) {
798                         /*
799                          * While a full scan would detect interrupted erasures
800                          * at attach time we can face them here when attached from
801                          * Fastmap.
802                          */
803                         dbg_wl("PEB %d has ECC errors, maybe from an interrupted erasure",
804                                e1->pnum);
805                         erase = 1;
806                         goto out_not_moved;
807                 }
808
809                 ubi_err(ubi, "error %d while reading VID header from PEB %d",
810                         err, e1->pnum);
811                 goto out_error;
812         }
813
814         vol_id = be32_to_cpu(vid_hdr->vol_id);
815         lnum = be32_to_cpu(vid_hdr->lnum);
816
817         err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vidb);
818         if (err) {
819                 if (err == MOVE_CANCEL_RACE) {
820                         /*
821                          * The LEB has not been moved because the volume is
822                          * being deleted or the PEB has been put meanwhile. We
823                          * should prevent this PEB from being selected for
824                          * wear-leveling movement again, so put it to the
825                          * protection queue.
826                          */
827                         protect = 1;
828                         dst_leb_clean = 1;
829                         goto out_not_moved;
830                 }
831                 if (err == MOVE_RETRY) {
832                         scrubbing = 1;
833                         dst_leb_clean = 1;
834                         goto out_not_moved;
835                 }
836                 if (err == MOVE_TARGET_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
837                     err == MOVE_TARGET_RD_ERR) {
838                         /*
839                          * Target PEB had bit-flips or write error - torture it.
840                          */
841                         torture = 1;
842                         keep = 1;
843                         goto out_not_moved;
844                 }
845
846                 if (err == MOVE_SOURCE_RD_ERR) {
847                         /*
848                          * An error happened while reading the source PEB. Do
849                          * not switch to R/O mode in this case, and give the
850                          * upper layers a possibility to recover from this,
851                          * e.g. by unmapping corresponding LEB. Instead, just
852                          * put this PEB to the @ubi->erroneous list to prevent
853                          * UBI from trying to move it over and over again.
854                          */
855                         if (ubi->erroneous_peb_count > ubi->max_erroneous) {
856                                 ubi_err(ubi, "too many erroneous eraseblocks (%d)",
857                                         ubi->erroneous_peb_count);
858                                 goto out_error;
859                         }
860                         dst_leb_clean = 1;
861                         erroneous = 1;
862                         goto out_not_moved;
863                 }
864
865                 if (err < 0)
866                         goto out_error;
867
868                 ubi_assert(0);
869         }
870
871         /* The PEB has been successfully moved */
872         if (scrubbing)
873                 ubi_msg(ubi, "scrubbed PEB %d (LEB %d:%d), data moved to PEB %d",
874                         e1->pnum, vol_id, lnum, e2->pnum);
875         ubi_free_vid_buf(vidb);
876
877         spin_lock(&ubi->wl_lock);
878         if (!ubi->move_to_put) {
879                 wl_tree_add(e2, &ubi->used);
880                 e2 = NULL;
881         }
882         ubi->move_from = ubi->move_to = NULL;
883         ubi->move_to_put = ubi->wl_scheduled = 0;
884         spin_unlock(&ubi->wl_lock);
885
886         err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
887         if (err) {
888                 if (e2)
889                         wl_entry_destroy(ubi, e2);
890                 goto out_ro;
891         }
892
893         if (e2) {
894                 /*
895                  * Well, the target PEB was put meanwhile, schedule it for
896                  * erasure.
897                  */
898                 dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
899                        e2->pnum, vol_id, lnum);
900                 err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
901                 if (err)
902                         goto out_ro;
903         }
904
905         dbg_wl("done");
906         mutex_unlock(&ubi->move_mutex);
907         up_read(&ubi->fm_eba_sem);
908         return 0;
909
910         /*
911          * For some reasons the LEB was not moved, might be an error, might be
912          * something else. @e1 was not changed, so return it back. @e2 might
913          * have been changed, schedule it for erasure.
914          */
915 out_not_moved:
916         if (vol_id != -1)
917                 dbg_wl("cancel moving PEB %d (LEB %d:%d) to PEB %d (%d)",
918                        e1->pnum, vol_id, lnum, e2->pnum, err);
919         else
920                 dbg_wl("cancel moving PEB %d to PEB %d (%d)",
921                        e1->pnum, e2->pnum, err);
922         spin_lock(&ubi->wl_lock);
923         if (protect)
924                 prot_queue_add(ubi, e1);
925         else if (erroneous) {
926                 wl_tree_add(e1, &ubi->erroneous);
927                 ubi->erroneous_peb_count += 1;
928         } else if (scrubbing)
929                 wl_tree_add(e1, &ubi->scrub);
930         else if (keep)
931                 wl_tree_add(e1, &ubi->used);
932         if (dst_leb_clean) {
933                 wl_tree_add(e2, &ubi->free);
934                 ubi->free_count++;
935         }
936
937         ubi_assert(!ubi->move_to_put);
938         ubi->move_from = ubi->move_to = NULL;
939         ubi->wl_scheduled = 0;
940         spin_unlock(&ubi->wl_lock);
941
942         ubi_free_vid_buf(vidb);
943         if (dst_leb_clean) {
944                 ensure_wear_leveling(ubi, 1);
945         } else {
946                 err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
947                 if (err)
948                         goto out_ro;
949         }
950
951         if (erase) {
952                 err = do_sync_erase(ubi, e1, vol_id, lnum, 1);
953                 if (err)
954                         goto out_ro;
955         }
956
957         mutex_unlock(&ubi->move_mutex);
958         up_read(&ubi->fm_eba_sem);
959         return 0;
960
961 out_error:
962         if (vol_id != -1)
963                 ubi_err(ubi, "error %d while moving PEB %d to PEB %d",
964                         err, e1->pnum, e2->pnum);
965         else
966                 ubi_err(ubi, "error %d while moving PEB %d (LEB %d:%d) to PEB %d",
967                         err, e1->pnum, vol_id, lnum, e2->pnum);
968         spin_lock(&ubi->wl_lock);
969         ubi->move_from = ubi->move_to = NULL;
970         ubi->move_to_put = ubi->wl_scheduled = 0;
971         spin_unlock(&ubi->wl_lock);
972
973         ubi_free_vid_buf(vidb);
974         wl_entry_destroy(ubi, e1);
975         wl_entry_destroy(ubi, e2);
976
977 out_ro:
978         ubi_ro_mode(ubi);
979         mutex_unlock(&ubi->move_mutex);
980         up_read(&ubi->fm_eba_sem);
981         ubi_assert(err != 0);
982         return err < 0 ? err : -EIO;
983
984 out_cancel:
985         ubi->wl_scheduled = 0;
986         spin_unlock(&ubi->wl_lock);
987         mutex_unlock(&ubi->move_mutex);
988         up_read(&ubi->fm_eba_sem);
989         ubi_free_vid_buf(vidb);
990         return 0;
991 }
992
993 /**
994  * ensure_wear_leveling - schedule wear-leveling if it is needed.
995  * @ubi: UBI device description object
996  * @nested: set to non-zero if this function is called from UBI worker
997  *
998  * This function checks if it is time to start wear-leveling and schedules it
999  * if yes. This function returns zero in case of success and a negative error
1000  * code in case of failure.
1001  */
1002 static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
1003 {
1004         int err = 0;
1005         struct ubi_wl_entry *e1;
1006         struct ubi_wl_entry *e2;
1007         struct ubi_work *wrk;
1008
1009         spin_lock(&ubi->wl_lock);
1010         if (ubi->wl_scheduled)
1011                 /* Wear-leveling is already in the work queue */
1012                 goto out_unlock;
1013
1014         /*
1015          * If the ubi->scrub tree is not empty, scrubbing is needed, and the
1016          * the WL worker has to be scheduled anyway.
1017          */
1018         if (!ubi->scrub.rb_node) {
1019                 if (!ubi->used.rb_node || !ubi->free.rb_node)
1020                         /* No physical eraseblocks - no deal */
1021                         goto out_unlock;
1022
1023                 /*
1024                  * We schedule wear-leveling only if the difference between the
1025                  * lowest erase counter of used physical eraseblocks and a high
1026                  * erase counter of free physical eraseblocks is greater than
1027                  * %UBI_WL_THRESHOLD.
1028                  */
1029                 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1030                 e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
1031
1032                 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1033                         goto out_unlock;
1034                 dbg_wl("schedule wear-leveling");
1035         } else
1036                 dbg_wl("schedule scrubbing");
1037
1038         ubi->wl_scheduled = 1;
1039         spin_unlock(&ubi->wl_lock);
1040
1041         wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
1042         if (!wrk) {
1043                 err = -ENOMEM;
1044                 goto out_cancel;
1045         }
1046
1047         wrk->func = &wear_leveling_worker;
1048         if (nested)
1049                 __schedule_ubi_work(ubi, wrk);
1050         else
1051                 schedule_ubi_work(ubi, wrk);
1052         return err;
1053
1054 out_cancel:
1055         spin_lock(&ubi->wl_lock);
1056         ubi->wl_scheduled = 0;
1057 out_unlock:
1058         spin_unlock(&ubi->wl_lock);
1059         return err;
1060 }
1061
1062 /**
1063  * __erase_worker - physical eraseblock erase worker function.
1064  * @ubi: UBI device description object
1065  * @wl_wrk: the work object
1066  * @shutdown: non-zero if the worker has to free memory and exit
1067  * because the WL sub-system is shutting down
1068  *
1069  * This function erases a physical eraseblock and perform torture testing if
1070  * needed. It also takes care about marking the physical eraseblock bad if
1071  * needed. Returns zero in case of success and a negative error code in case of
1072  * failure.
1073  */
1074 static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
1075 {
1076         struct ubi_wl_entry *e = wl_wrk->e;
1077         int pnum = e->pnum;
1078         int vol_id = wl_wrk->vol_id;
1079         int lnum = wl_wrk->lnum;
1080         int err, available_consumed = 0;
1081
1082         dbg_wl("erase PEB %d EC %d LEB %d:%d",
1083                pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
1084
1085         err = sync_erase(ubi, e, wl_wrk->torture);
1086         if (!err) {
1087                 spin_lock(&ubi->wl_lock);
1088
1089                 if (!ubi->fm_disabled && !ubi->fm_anchor &&
1090                     e->pnum < UBI_FM_MAX_START) {
1091                         /*
1092                          * Abort anchor production, if needed it will be
1093                          * enabled again in the wear leveling started below.
1094                          */
1095                         ubi->fm_anchor = e;
1096                         ubi->fm_do_produce_anchor = 0;
1097                 } else {
1098                         wl_tree_add(e, &ubi->free);
1099                         ubi->free_count++;
1100                 }
1101
1102                 spin_unlock(&ubi->wl_lock);
1103
1104                 /*
1105                  * One more erase operation has happened, take care about
1106                  * protected physical eraseblocks.
1107                  */
1108                 serve_prot_queue(ubi);
1109
1110                 /* And take care about wear-leveling */
1111                 err = ensure_wear_leveling(ubi, 1);
1112                 return err;
1113         }
1114
1115         ubi_err(ubi, "failed to erase PEB %d, error %d", pnum, err);
1116
1117         if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1118             err == -EBUSY) {
1119                 int err1;
1120
1121                 /* Re-schedule the LEB for erasure */
1122                 err1 = schedule_erase(ubi, e, vol_id, lnum, 0, false);
1123                 if (err1) {
1124                         wl_entry_destroy(ubi, e);
1125                         err = err1;
1126                         goto out_ro;
1127                 }
1128                 return err;
1129         }
1130
1131         wl_entry_destroy(ubi, e);
1132         if (err != -EIO)
1133                 /*
1134                  * If this is not %-EIO, we have no idea what to do. Scheduling
1135                  * this physical eraseblock for erasure again would cause
1136                  * errors again and again. Well, lets switch to R/O mode.
1137                  */
1138                 goto out_ro;
1139
1140         /* It is %-EIO, the PEB went bad */
1141
1142         if (!ubi->bad_allowed) {
1143                 ubi_err(ubi, "bad physical eraseblock %d detected", pnum);
1144                 goto out_ro;
1145         }
1146
1147         spin_lock(&ubi->volumes_lock);
1148         if (ubi->beb_rsvd_pebs == 0) {
1149                 if (ubi->avail_pebs == 0) {
1150                         spin_unlock(&ubi->volumes_lock);
1151                         ubi_err(ubi, "no reserved/available physical eraseblocks");
1152                         goto out_ro;
1153                 }
1154                 ubi->avail_pebs -= 1;
1155                 available_consumed = 1;
1156         }
1157         spin_unlock(&ubi->volumes_lock);
1158
1159         ubi_msg(ubi, "mark PEB %d as bad", pnum);
1160         err = ubi_io_mark_bad(ubi, pnum);
1161         if (err)
1162                 goto out_ro;
1163
1164         spin_lock(&ubi->volumes_lock);
1165         if (ubi->beb_rsvd_pebs > 0) {
1166                 if (available_consumed) {
1167                         /*
1168                          * The amount of reserved PEBs increased since we last
1169                          * checked.
1170                          */
1171                         ubi->avail_pebs += 1;
1172                         available_consumed = 0;
1173                 }
1174                 ubi->beb_rsvd_pebs -= 1;
1175         }
1176         ubi->bad_peb_count += 1;
1177         ubi->good_peb_count -= 1;
1178         ubi_calculate_reserved(ubi);
1179         if (available_consumed)
1180                 ubi_warn(ubi, "no PEBs in the reserved pool, used an available PEB");
1181         else if (ubi->beb_rsvd_pebs)
1182                 ubi_msg(ubi, "%d PEBs left in the reserve",
1183                         ubi->beb_rsvd_pebs);
1184         else
1185                 ubi_warn(ubi, "last PEB from the reserve was used");
1186         spin_unlock(&ubi->volumes_lock);
1187
1188         return err;
1189
1190 out_ro:
1191         if (available_consumed) {
1192                 spin_lock(&ubi->volumes_lock);
1193                 ubi->avail_pebs += 1;
1194                 spin_unlock(&ubi->volumes_lock);
1195         }
1196         ubi_ro_mode(ubi);
1197         return err;
1198 }
1199
1200 static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
1201                           int shutdown)
1202 {
1203         int ret;
1204
1205         if (shutdown) {
1206                 struct ubi_wl_entry *e = wl_wrk->e;
1207
1208                 dbg_wl("cancel erasure of PEB %d EC %d", e->pnum, e->ec);
1209                 kfree(wl_wrk);
1210                 wl_entry_destroy(ubi, e);
1211                 return 0;
1212         }
1213
1214         ret = __erase_worker(ubi, wl_wrk);
1215         kfree(wl_wrk);
1216         return ret;
1217 }
1218
1219 /**
1220  * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
1221  * @ubi: UBI device description object
1222  * @vol_id: the volume ID that last used this PEB
1223  * @lnum: the last used logical eraseblock number for the PEB
1224  * @pnum: physical eraseblock to return
1225  * @torture: if this physical eraseblock has to be tortured
1226  *
1227  * This function is called to return physical eraseblock @pnum to the pool of
1228  * free physical eraseblocks. The @torture flag has to be set if an I/O error
1229  * occurred to this @pnum and it has to be tested. This function returns zero
1230  * in case of success, and a negative error code in case of failure.
1231  */
1232 int ubi_wl_put_peb(struct ubi_device *ubi, int vol_id, int lnum,
1233                    int pnum, int torture)
1234 {
1235         int err;
1236         struct ubi_wl_entry *e;
1237
1238         dbg_wl("PEB %d", pnum);
1239         ubi_assert(pnum >= 0);
1240         ubi_assert(pnum < ubi->peb_count);
1241
1242         down_read(&ubi->fm_protect);
1243
1244 retry:
1245         spin_lock(&ubi->wl_lock);
1246         e = ubi->lookuptbl[pnum];
1247         if (e == ubi->move_from) {
1248                 /*
1249                  * User is putting the physical eraseblock which was selected to
1250                  * be moved. It will be scheduled for erasure in the
1251                  * wear-leveling worker.
1252                  */
1253                 dbg_wl("PEB %d is being moved, wait", pnum);
1254                 spin_unlock(&ubi->wl_lock);
1255
1256                 /* Wait for the WL worker by taking the @ubi->move_mutex */
1257                 mutex_lock(&ubi->move_mutex);
1258                 mutex_unlock(&ubi->move_mutex);
1259                 goto retry;
1260         } else if (e == ubi->move_to) {
1261                 /*
1262                  * User is putting the physical eraseblock which was selected
1263                  * as the target the data is moved to. It may happen if the EBA
1264                  * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
1265                  * but the WL sub-system has not put the PEB to the "used" tree
1266                  * yet, but it is about to do this. So we just set a flag which
1267                  * will tell the WL worker that the PEB is not needed anymore
1268                  * and should be scheduled for erasure.
1269                  */
1270                 dbg_wl("PEB %d is the target of data moving", pnum);
1271                 ubi_assert(!ubi->move_to_put);
1272                 ubi->move_to_put = 1;
1273                 spin_unlock(&ubi->wl_lock);
1274                 up_read(&ubi->fm_protect);
1275                 return 0;
1276         } else {
1277                 if (in_wl_tree(e, &ubi->used)) {
1278                         self_check_in_wl_tree(ubi, e, &ubi->used);
1279                         rb_erase(&e->u.rb, &ubi->used);
1280                 } else if (in_wl_tree(e, &ubi->scrub)) {
1281                         self_check_in_wl_tree(ubi, e, &ubi->scrub);
1282                         rb_erase(&e->u.rb, &ubi->scrub);
1283                 } else if (in_wl_tree(e, &ubi->erroneous)) {
1284                         self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1285                         rb_erase(&e->u.rb, &ubi->erroneous);
1286                         ubi->erroneous_peb_count -= 1;
1287                         ubi_assert(ubi->erroneous_peb_count >= 0);
1288                         /* Erroneous PEBs should be tortured */
1289                         torture = 1;
1290                 } else {
1291                         err = prot_queue_del(ubi, e->pnum);
1292                         if (err) {
1293                                 ubi_err(ubi, "PEB %d not found", pnum);
1294                                 ubi_ro_mode(ubi);
1295                                 spin_unlock(&ubi->wl_lock);
1296                                 up_read(&ubi->fm_protect);
1297                                 return err;
1298                         }
1299                 }
1300         }
1301         spin_unlock(&ubi->wl_lock);
1302
1303         err = schedule_erase(ubi, e, vol_id, lnum, torture, false);
1304         if (err) {
1305                 spin_lock(&ubi->wl_lock);
1306                 wl_tree_add(e, &ubi->used);
1307                 spin_unlock(&ubi->wl_lock);
1308         }
1309
1310         up_read(&ubi->fm_protect);
1311         return err;
1312 }
1313
1314 /**
1315  * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1316  * @ubi: UBI device description object
1317  * @pnum: the physical eraseblock to schedule
1318  *
1319  * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1320  * needs scrubbing. This function schedules a physical eraseblock for
1321  * scrubbing which is done in background. This function returns zero in case of
1322  * success and a negative error code in case of failure.
1323  */
1324 int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
1325 {
1326         struct ubi_wl_entry *e;
1327
1328         ubi_msg(ubi, "schedule PEB %d for scrubbing", pnum);
1329
1330 retry:
1331         spin_lock(&ubi->wl_lock);
1332         e = ubi->lookuptbl[pnum];
1333         if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1334                                    in_wl_tree(e, &ubi->erroneous)) {
1335                 spin_unlock(&ubi->wl_lock);
1336                 return 0;
1337         }
1338
1339         if (e == ubi->move_to) {
1340                 /*
1341                  * This physical eraseblock was used to move data to. The data
1342                  * was moved but the PEB was not yet inserted to the proper
1343                  * tree. We should just wait a little and let the WL worker
1344                  * proceed.
1345                  */
1346                 spin_unlock(&ubi->wl_lock);
1347                 dbg_wl("the PEB %d is not in proper tree, retry", pnum);
1348                 yield();
1349                 goto retry;
1350         }
1351
1352         if (in_wl_tree(e, &ubi->used)) {
1353                 self_check_in_wl_tree(ubi, e, &ubi->used);
1354                 rb_erase(&e->u.rb, &ubi->used);
1355         } else {
1356                 int err;
1357
1358                 err = prot_queue_del(ubi, e->pnum);
1359                 if (err) {
1360                         ubi_err(ubi, "PEB %d not found", pnum);
1361                         ubi_ro_mode(ubi);
1362                         spin_unlock(&ubi->wl_lock);
1363                         return err;
1364                 }
1365         }
1366
1367         wl_tree_add(e, &ubi->scrub);
1368         spin_unlock(&ubi->wl_lock);
1369
1370         /*
1371          * Technically scrubbing is the same as wear-leveling, so it is done
1372          * by the WL worker.
1373          */
1374         return ensure_wear_leveling(ubi, 0);
1375 }
1376
1377 /**
1378  * ubi_wl_flush - flush all pending works.
1379  * @ubi: UBI device description object
1380  * @vol_id: the volume id to flush for
1381  * @lnum: the logical eraseblock number to flush for
1382  *
1383  * This function executes all pending works for a particular volume id /
1384  * logical eraseblock number pair. If either value is set to %UBI_ALL, then it
1385  * acts as a wildcard for all of the corresponding volume numbers or logical
1386  * eraseblock numbers. It returns zero in case of success and a negative error
1387  * code in case of failure.
1388  */
1389 int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1390 {
1391         int err = 0;
1392         int found = 1;
1393
1394         /*
1395          * Erase while the pending works queue is not empty, but not more than
1396          * the number of currently pending works.
1397          */
1398         dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
1399                vol_id, lnum, ubi->works_count);
1400
1401         while (found) {
1402                 struct ubi_work *wrk, *tmp;
1403                 found = 0;
1404
1405                 down_read(&ubi->work_sem);
1406                 spin_lock(&ubi->wl_lock);
1407                 list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
1408                         if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
1409                             (lnum == UBI_ALL || wrk->lnum == lnum)) {
1410                                 list_del(&wrk->list);
1411                                 ubi->works_count -= 1;
1412                                 ubi_assert(ubi->works_count >= 0);
1413                                 spin_unlock(&ubi->wl_lock);
1414
1415                                 err = wrk->func(ubi, wrk, 0);
1416                                 if (err) {
1417                                         up_read(&ubi->work_sem);
1418                                         return err;
1419                                 }
1420
1421                                 spin_lock(&ubi->wl_lock);
1422                                 found = 1;
1423                                 break;
1424                         }
1425                 }
1426                 spin_unlock(&ubi->wl_lock);
1427                 up_read(&ubi->work_sem);
1428         }
1429
1430         /*
1431          * Make sure all the works which have been done in parallel are
1432          * finished.
1433          */
1434         down_write(&ubi->work_sem);
1435         up_write(&ubi->work_sem);
1436
1437         return err;
1438 }
1439
1440 static bool scrub_possible(struct ubi_device *ubi, struct ubi_wl_entry *e)
1441 {
1442         if (in_wl_tree(e, &ubi->scrub))
1443                 return false;
1444         else if (in_wl_tree(e, &ubi->erroneous))
1445                 return false;
1446         else if (ubi->move_from == e)
1447                 return false;
1448         else if (ubi->move_to == e)
1449                 return false;
1450
1451         return true;
1452 }
1453
1454 /**
1455  * ubi_bitflip_check - Check an eraseblock for bitflips and scrub it if needed.
1456  * @ubi: UBI device description object
1457  * @pnum: the physical eraseblock to schedule
1458  * @force: dont't read the block, assume bitflips happened and take action.
1459  *
1460  * This function reads the given eraseblock and checks if bitflips occured.
1461  * In case of bitflips, the eraseblock is scheduled for scrubbing.
1462  * If scrubbing is forced with @force, the eraseblock is not read,
1463  * but scheduled for scrubbing right away.
1464  *
1465  * Returns:
1466  * %EINVAL, PEB is out of range
1467  * %ENOENT, PEB is no longer used by UBI
1468  * %EBUSY, PEB cannot be checked now or a check is currently running on it
1469  * %EAGAIN, bit flips happened but scrubbing is currently not possible
1470  * %EUCLEAN, bit flips happened and PEB is scheduled for scrubbing
1471  * %0, no bit flips detected
1472  */
1473 int ubi_bitflip_check(struct ubi_device *ubi, int pnum, int force)
1474 {
1475         int err = 0;
1476         struct ubi_wl_entry *e;
1477
1478         if (pnum < 0 || pnum >= ubi->peb_count) {
1479                 err = -EINVAL;
1480                 goto out;
1481         }
1482
1483         /*
1484          * Pause all parallel work, otherwise it can happen that the
1485          * erase worker frees a wl entry under us.
1486          */
1487         down_write(&ubi->work_sem);
1488
1489         /*
1490          * Make sure that the wl entry does not change state while
1491          * inspecting it.
1492          */
1493         spin_lock(&ubi->wl_lock);
1494         e = ubi->lookuptbl[pnum];
1495         if (!e) {
1496                 spin_unlock(&ubi->wl_lock);
1497                 err = -ENOENT;
1498                 goto out_resume;
1499         }
1500
1501         /*
1502          * Does it make sense to check this PEB?
1503          */
1504         if (!scrub_possible(ubi, e)) {
1505                 spin_unlock(&ubi->wl_lock);
1506                 err = -EBUSY;
1507                 goto out_resume;
1508         }
1509         spin_unlock(&ubi->wl_lock);
1510
1511         if (!force) {
1512                 mutex_lock(&ubi->buf_mutex);
1513                 err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
1514                 mutex_unlock(&ubi->buf_mutex);
1515         }
1516
1517         if (force || err == UBI_IO_BITFLIPS) {
1518                 /*
1519                  * Okay, bit flip happened, let's figure out what we can do.
1520                  */
1521                 spin_lock(&ubi->wl_lock);
1522
1523                 /*
1524                  * Recheck. We released wl_lock, UBI might have killed the
1525                  * wl entry under us.
1526                  */
1527                 e = ubi->lookuptbl[pnum];
1528                 if (!e) {
1529                         spin_unlock(&ubi->wl_lock);
1530                         err = -ENOENT;
1531                         goto out_resume;
1532                 }
1533
1534                 /*
1535                  * Need to re-check state
1536                  */
1537                 if (!scrub_possible(ubi, e)) {
1538                         spin_unlock(&ubi->wl_lock);
1539                         err = -EBUSY;
1540                         goto out_resume;
1541                 }
1542
1543                 if (in_pq(ubi, e)) {
1544                         prot_queue_del(ubi, e->pnum);
1545                         wl_tree_add(e, &ubi->scrub);
1546                         spin_unlock(&ubi->wl_lock);
1547
1548                         err = ensure_wear_leveling(ubi, 1);
1549                 } else if (in_wl_tree(e, &ubi->used)) {
1550                         rb_erase(&e->u.rb, &ubi->used);
1551                         wl_tree_add(e, &ubi->scrub);
1552                         spin_unlock(&ubi->wl_lock);
1553
1554                         err = ensure_wear_leveling(ubi, 1);
1555                 } else if (in_wl_tree(e, &ubi->free)) {
1556                         rb_erase(&e->u.rb, &ubi->free);
1557                         ubi->free_count--;
1558                         spin_unlock(&ubi->wl_lock);
1559
1560                         /*
1561                          * This PEB is empty we can schedule it for
1562                          * erasure right away. No wear leveling needed.
1563                          */
1564                         err = schedule_erase(ubi, e, UBI_UNKNOWN, UBI_UNKNOWN,
1565                                              force ? 0 : 1, true);
1566                 } else {
1567                         spin_unlock(&ubi->wl_lock);
1568                         err = -EAGAIN;
1569                 }
1570
1571                 if (!err && !force)
1572                         err = -EUCLEAN;
1573         } else {
1574                 err = 0;
1575         }
1576
1577 out_resume:
1578         up_write(&ubi->work_sem);
1579 out:
1580
1581         return err;
1582 }
1583
1584 /**
1585  * tree_destroy - destroy an RB-tree.
1586  * @ubi: UBI device description object
1587  * @root: the root of the tree to destroy
1588  */
1589 static void tree_destroy(struct ubi_device *ubi, struct rb_root *root)
1590 {
1591         struct rb_node *rb;
1592         struct ubi_wl_entry *e;
1593
1594         rb = root->rb_node;
1595         while (rb) {
1596                 if (rb->rb_left)
1597                         rb = rb->rb_left;
1598                 else if (rb->rb_right)
1599                         rb = rb->rb_right;
1600                 else {
1601                         e = rb_entry(rb, struct ubi_wl_entry, u.rb);
1602
1603                         rb = rb_parent(rb);
1604                         if (rb) {
1605                                 if (rb->rb_left == &e->u.rb)
1606                                         rb->rb_left = NULL;
1607                                 else
1608                                         rb->rb_right = NULL;
1609                         }
1610
1611                         wl_entry_destroy(ubi, e);
1612                 }
1613         }
1614 }
1615
1616 /**
1617  * ubi_thread - UBI background thread.
1618  * @u: the UBI device description object pointer
1619  */
1620 int ubi_thread(void *u)
1621 {
1622         int failures = 0;
1623         struct ubi_device *ubi = u;
1624
1625         ubi_msg(ubi, "background thread \"%s\" started, PID %d",
1626                 ubi->bgt_name, task_pid_nr(current));
1627
1628         set_freezable();
1629         for (;;) {
1630                 int err;
1631
1632                 if (kthread_should_stop())
1633                         break;
1634
1635                 if (try_to_freeze())
1636                         continue;
1637
1638                 spin_lock(&ubi->wl_lock);
1639                 if (list_empty(&ubi->works) || ubi->ro_mode ||
1640                     !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1641                         set_current_state(TASK_INTERRUPTIBLE);
1642                         spin_unlock(&ubi->wl_lock);
1643
1644                         /*
1645                          * Check kthread_should_stop() after we set the task
1646                          * state to guarantee that we either see the stop bit
1647                          * and exit or the task state is reset to runnable such
1648                          * that it's not scheduled out indefinitely and detects
1649                          * the stop bit at kthread_should_stop().
1650                          */
1651                         if (kthread_should_stop()) {
1652                                 set_current_state(TASK_RUNNING);
1653                                 break;
1654                         }
1655
1656                         schedule();
1657                         continue;
1658                 }
1659                 spin_unlock(&ubi->wl_lock);
1660
1661                 err = do_work(ubi);
1662                 if (err) {
1663                         ubi_err(ubi, "%s: work failed with error code %d",
1664                                 ubi->bgt_name, err);
1665                         if (failures++ > WL_MAX_FAILURES) {
1666                                 /*
1667                                  * Too many failures, disable the thread and
1668                                  * switch to read-only mode.
1669                                  */
1670                                 ubi_msg(ubi, "%s: %d consecutive failures",
1671                                         ubi->bgt_name, WL_MAX_FAILURES);
1672                                 ubi_ro_mode(ubi);
1673                                 ubi->thread_enabled = 0;
1674                                 continue;
1675                         }
1676                 } else
1677                         failures = 0;
1678
1679                 cond_resched();
1680         }
1681
1682         dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1683         ubi->thread_enabled = 0;
1684         return 0;
1685 }
1686
1687 /**
1688  * shutdown_work - shutdown all pending works.
1689  * @ubi: UBI device description object
1690  */
1691 static void shutdown_work(struct ubi_device *ubi)
1692 {
1693         while (!list_empty(&ubi->works)) {
1694                 struct ubi_work *wrk;
1695
1696                 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1697                 list_del(&wrk->list);
1698                 wrk->func(ubi, wrk, 1);
1699                 ubi->works_count -= 1;
1700                 ubi_assert(ubi->works_count >= 0);
1701         }
1702 }
1703
1704 /**
1705  * erase_aeb - erase a PEB given in UBI attach info PEB
1706  * @ubi: UBI device description object
1707  * @aeb: UBI attach info PEB
1708  * @sync: If true, erase synchronously. Otherwise schedule for erasure
1709  */
1710 static int erase_aeb(struct ubi_device *ubi, struct ubi_ainf_peb *aeb, bool sync)
1711 {
1712         struct ubi_wl_entry *e;
1713         int err;
1714
1715         e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1716         if (!e)
1717                 return -ENOMEM;
1718
1719         e->pnum = aeb->pnum;
1720         e->ec = aeb->ec;
1721         ubi->lookuptbl[e->pnum] = e;
1722
1723         if (sync) {
1724                 err = sync_erase(ubi, e, false);
1725                 if (err)
1726                         goto out_free;
1727
1728                 wl_tree_add(e, &ubi->free);
1729                 ubi->free_count++;
1730         } else {
1731                 err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
1732                 if (err)
1733                         goto out_free;
1734         }
1735
1736         return 0;
1737
1738 out_free:
1739         wl_entry_destroy(ubi, e);
1740
1741         return err;
1742 }
1743
1744 /**
1745  * ubi_wl_init - initialize the WL sub-system using attaching information.
1746  * @ubi: UBI device description object
1747  * @ai: attaching information
1748  *
1749  * This function returns zero in case of success, and a negative error code in
1750  * case of failure.
1751  */
1752 int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1753 {
1754         int err, i, reserved_pebs, found_pebs = 0;
1755         struct rb_node *rb1, *rb2;
1756         struct ubi_ainf_volume *av;
1757         struct ubi_ainf_peb *aeb, *tmp;
1758         struct ubi_wl_entry *e;
1759
1760         ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1761         spin_lock_init(&ubi->wl_lock);
1762         mutex_init(&ubi->move_mutex);
1763         init_rwsem(&ubi->work_sem);
1764         ubi->max_ec = ai->max_ec;
1765         INIT_LIST_HEAD(&ubi->works);
1766
1767         sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1768
1769         err = -ENOMEM;
1770         ubi->lookuptbl = kcalloc(ubi->peb_count, sizeof(void *), GFP_KERNEL);
1771         if (!ubi->lookuptbl)
1772                 return err;
1773
1774         for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
1775                 INIT_LIST_HEAD(&ubi->pq[i]);
1776         ubi->pq_head = 0;
1777
1778         ubi->free_count = 0;
1779         list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
1780                 cond_resched();
1781
1782                 err = erase_aeb(ubi, aeb, false);
1783                 if (err)
1784                         goto out_free;
1785
1786                 found_pebs++;
1787         }
1788
1789         list_for_each_entry(aeb, &ai->free, u.list) {
1790                 cond_resched();
1791
1792                 e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1793                 if (!e) {
1794                         err = -ENOMEM;
1795                         goto out_free;
1796                 }
1797
1798                 e->pnum = aeb->pnum;
1799                 e->ec = aeb->ec;
1800                 ubi_assert(e->ec >= 0);
1801
1802                 wl_tree_add(e, &ubi->free);
1803                 ubi->free_count++;
1804
1805                 ubi->lookuptbl[e->pnum] = e;
1806
1807                 found_pebs++;
1808         }
1809
1810         ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1811                 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1812                         cond_resched();
1813
1814                         e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
1815                         if (!e) {
1816                                 err = -ENOMEM;
1817                                 goto out_free;
1818                         }
1819
1820                         e->pnum = aeb->pnum;
1821                         e->ec = aeb->ec;
1822                         ubi->lookuptbl[e->pnum] = e;
1823
1824                         if (!aeb->scrub) {
1825                                 dbg_wl("add PEB %d EC %d to the used tree",
1826                                        e->pnum, e->ec);
1827                                 wl_tree_add(e, &ubi->used);
1828                         } else {
1829                                 dbg_wl("add PEB %d EC %d to the scrub tree",
1830                                        e->pnum, e->ec);
1831                                 wl_tree_add(e, &ubi->scrub);
1832                         }
1833
1834                         found_pebs++;
1835                 }
1836         }
1837
1838         list_for_each_entry(aeb, &ai->fastmap, u.list) {
1839                 cond_resched();
1840
1841                 e = ubi_find_fm_block(ubi, aeb->pnum);
1842
1843                 if (e) {
1844                         ubi_assert(!ubi->lookuptbl[e->pnum]);
1845                         ubi->lookuptbl[e->pnum] = e;
1846                 } else {
1847                         bool sync = false;
1848
1849                         /*
1850                          * Usually old Fastmap PEBs are scheduled for erasure
1851                          * and we don't have to care about them but if we face
1852                          * an power cut before scheduling them we need to
1853                          * take care of them here.
1854                          */
1855                         if (ubi->lookuptbl[aeb->pnum])
1856                                 continue;
1857
1858                         /*
1859                          * The fastmap update code might not find a free PEB for
1860                          * writing the fastmap anchor to and then reuses the
1861                          * current fastmap anchor PEB. When this PEB gets erased
1862                          * and a power cut happens before it is written again we
1863                          * must make sure that the fastmap attach code doesn't
1864                          * find any outdated fastmap anchors, hence we erase the
1865                          * outdated fastmap anchor PEBs synchronously here.
1866                          */
1867                         if (aeb->vol_id == UBI_FM_SB_VOLUME_ID)
1868                                 sync = true;
1869
1870                         err = erase_aeb(ubi, aeb, sync);
1871                         if (err)
1872                                 goto out_free;
1873                 }
1874
1875                 found_pebs++;
1876         }
1877
1878         dbg_wl("found %i PEBs", found_pebs);
1879
1880         ubi_assert(ubi->good_peb_count == found_pebs);
1881
1882         reserved_pebs = WL_RESERVED_PEBS;
1883         ubi_fastmap_init(ubi, &reserved_pebs);
1884
1885         if (ubi->avail_pebs < reserved_pebs) {
1886                 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1887                         ubi->avail_pebs, reserved_pebs);
1888                 if (ubi->corr_peb_count)
1889                         ubi_err(ubi, "%d PEBs are corrupted and not used",
1890                                 ubi->corr_peb_count);
1891                 err = -ENOSPC;
1892                 goto out_free;
1893         }
1894         ubi->avail_pebs -= reserved_pebs;
1895         ubi->rsvd_pebs += reserved_pebs;
1896
1897         /* Schedule wear-leveling if needed */
1898         err = ensure_wear_leveling(ubi, 0);
1899         if (err)
1900                 goto out_free;
1901
1902 #ifdef CONFIG_MTD_UBI_FASTMAP
1903         if (!ubi->ro_mode && !ubi->fm_disabled)
1904                 ubi_ensure_anchor_pebs(ubi);
1905 #endif
1906         return 0;
1907
1908 out_free:
1909         shutdown_work(ubi);
1910         tree_destroy(ubi, &ubi->used);
1911         tree_destroy(ubi, &ubi->free);
1912         tree_destroy(ubi, &ubi->scrub);
1913         kfree(ubi->lookuptbl);
1914         return err;
1915 }
1916
1917 /**
1918  * protection_queue_destroy - destroy the protection queue.
1919  * @ubi: UBI device description object
1920  */
1921 static void protection_queue_destroy(struct ubi_device *ubi)
1922 {
1923         int i;
1924         struct ubi_wl_entry *e, *tmp;
1925
1926         for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
1927                 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1928                         list_del(&e->u.list);
1929                         wl_entry_destroy(ubi, e);
1930                 }
1931         }
1932 }
1933
1934 /**
1935  * ubi_wl_close - close the wear-leveling sub-system.
1936  * @ubi: UBI device description object
1937  */
1938 void ubi_wl_close(struct ubi_device *ubi)
1939 {
1940         dbg_wl("close the WL sub-system");
1941         ubi_fastmap_close(ubi);
1942         shutdown_work(ubi);
1943         protection_queue_destroy(ubi);
1944         tree_destroy(ubi, &ubi->used);
1945         tree_destroy(ubi, &ubi->erroneous);
1946         tree_destroy(ubi, &ubi->free);
1947         tree_destroy(ubi, &ubi->scrub);
1948         kfree(ubi->lookuptbl);
1949 }
1950
1951 /**
1952  * self_check_ec - make sure that the erase counter of a PEB is correct.
1953  * @ubi: UBI device description object
1954  * @pnum: the physical eraseblock number to check
1955  * @ec: the erase counter to check
1956  *
1957  * This function returns zero if the erase counter of physical eraseblock @pnum
1958  * is equivalent to @ec, and a negative error code if not or if an error
1959  * occurred.
1960  */
1961 static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
1962 {
1963         int err;
1964         long long read_ec;
1965         struct ubi_ec_hdr *ec_hdr;
1966
1967         if (!ubi_dbg_chk_gen(ubi))
1968                 return 0;
1969
1970         ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
1971         if (!ec_hdr)
1972                 return -ENOMEM;
1973
1974         err = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
1975         if (err && err != UBI_IO_BITFLIPS) {
1976                 /* The header does not have to exist */
1977                 err = 0;
1978                 goto out_free;
1979         }
1980
1981         read_ec = be64_to_cpu(ec_hdr->ec);
1982         if (ec != read_ec && read_ec - ec > 1) {
1983                 ubi_err(ubi, "self-check failed for PEB %d", pnum);
1984                 ubi_err(ubi, "read EC is %lld, should be %d", read_ec, ec);
1985                 dump_stack();
1986                 err = 1;
1987         } else
1988                 err = 0;
1989
1990 out_free:
1991         kfree(ec_hdr);
1992         return err;
1993 }
1994
1995 /**
1996  * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
1997  * @ubi: UBI device description object
1998  * @e: the wear-leveling entry to check
1999  * @root: the root of the tree
2000  *
2001  * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
2002  * is not.
2003  */
2004 static int self_check_in_wl_tree(const struct ubi_device *ubi,
2005                                  struct ubi_wl_entry *e, struct rb_root *root)
2006 {
2007         if (!ubi_dbg_chk_gen(ubi))
2008                 return 0;
2009
2010         if (in_wl_tree(e, root))
2011                 return 0;
2012
2013         ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
2014                 e->pnum, e->ec, root);
2015         dump_stack();
2016         return -EINVAL;
2017 }
2018
2019 /**
2020  * self_check_in_pq - check if wear-leveling entry is in the protection
2021  *                        queue.
2022  * @ubi: UBI device description object
2023  * @e: the wear-leveling entry to check
2024  *
2025  * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
2026  */
2027 static int self_check_in_pq(const struct ubi_device *ubi,
2028                             struct ubi_wl_entry *e)
2029 {
2030         if (!ubi_dbg_chk_gen(ubi))
2031                 return 0;
2032
2033         if (in_pq(ubi, e))
2034                 return 0;
2035
2036         ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
2037                 e->pnum, e->ec);
2038         dump_stack();
2039         return -EINVAL;
2040 }
2041 #ifndef CONFIG_MTD_UBI_FASTMAP
2042 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
2043 {
2044         struct ubi_wl_entry *e;
2045
2046         e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
2047         self_check_in_wl_tree(ubi, e, &ubi->free);
2048         ubi->free_count--;
2049         ubi_assert(ubi->free_count >= 0);
2050         rb_erase(&e->u.rb, &ubi->free);
2051
2052         return e;
2053 }
2054
2055 /**
2056  * produce_free_peb - produce a free physical eraseblock.
2057  * @ubi: UBI device description object
2058  *
2059  * This function tries to make a free PEB by means of synchronous execution of
2060  * pending works. This may be needed if, for example the background thread is
2061  * disabled. Returns zero in case of success and a negative error code in case
2062  * of failure.
2063  */
2064 static int produce_free_peb(struct ubi_device *ubi)
2065 {
2066         int err;
2067
2068         while (!ubi->free.rb_node && ubi->works_count) {
2069                 spin_unlock(&ubi->wl_lock);
2070
2071                 dbg_wl("do one work synchronously");
2072                 err = do_work(ubi);
2073
2074                 spin_lock(&ubi->wl_lock);
2075                 if (err)
2076                         return err;
2077         }
2078
2079         return 0;
2080 }
2081
2082 /**
2083  * ubi_wl_get_peb - get a physical eraseblock.
2084  * @ubi: UBI device description object
2085  *
2086  * This function returns a physical eraseblock in case of success and a
2087  * negative error code in case of failure.
2088  * Returns with ubi->fm_eba_sem held in read mode!
2089  */
2090 int ubi_wl_get_peb(struct ubi_device *ubi)
2091 {
2092         int err;
2093         struct ubi_wl_entry *e;
2094
2095 retry:
2096         down_read(&ubi->fm_eba_sem);
2097         spin_lock(&ubi->wl_lock);
2098         if (!ubi->free.rb_node) {
2099                 if (ubi->works_count == 0) {
2100                         ubi_err(ubi, "no free eraseblocks");
2101                         ubi_assert(list_empty(&ubi->works));
2102                         spin_unlock(&ubi->wl_lock);
2103                         return -ENOSPC;
2104                 }
2105
2106                 err = produce_free_peb(ubi);
2107                 if (err < 0) {
2108                         spin_unlock(&ubi->wl_lock);
2109                         return err;
2110                 }
2111                 spin_unlock(&ubi->wl_lock);
2112                 up_read(&ubi->fm_eba_sem);
2113                 goto retry;
2114
2115         }
2116         e = wl_get_wle(ubi);
2117         prot_queue_add(ubi, e);
2118         spin_unlock(&ubi->wl_lock);
2119
2120         err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
2121                                     ubi->peb_size - ubi->vid_hdr_aloffset);
2122         if (err) {
2123                 ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
2124                 return err;
2125         }
2126
2127         return e->pnum;
2128 }
2129 #else
2130 #include "fastmap-wl.c"
2131 #endif