GNU Linux-libre 6.8.7-gnu
[releases.git] / drivers / mtd / ubi / fastmap-wl.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (c) 2012 Linutronix GmbH
4  * Copyright (c) 2014 sigma star gmbh
5  * Author: Richard Weinberger <richard@nod.at>
6  */
7
8 /**
9  * update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
10  * @wrk: the work description object
11  */
12 static void update_fastmap_work_fn(struct work_struct *wrk)
13 {
14         struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
15
16         ubi_update_fastmap(ubi);
17         spin_lock(&ubi->wl_lock);
18         ubi->fm_work_scheduled = 0;
19         spin_unlock(&ubi->wl_lock);
20 }
21
22 /**
23  * find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
24  * @root: the RB-tree where to look for
25  */
26 static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
27 {
28         struct rb_node *p;
29         struct ubi_wl_entry *e, *victim = NULL;
30         int max_ec = UBI_MAX_ERASECOUNTER;
31
32         ubi_rb_for_each_entry(p, e, root, u.rb) {
33                 if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
34                         victim = e;
35                         max_ec = e->ec;
36                 }
37         }
38
39         return victim;
40 }
41
42 static inline void return_unused_peb(struct ubi_device *ubi,
43                                      struct ubi_wl_entry *e)
44 {
45         wl_tree_add(e, &ubi->free);
46         ubi->free_count++;
47 }
48
49 /**
50  * return_unused_pool_pebs - returns unused PEB to the free tree.
51  * @ubi: UBI device description object
52  * @pool: fastmap pool description object
53  */
54 static void return_unused_pool_pebs(struct ubi_device *ubi,
55                                     struct ubi_fm_pool *pool)
56 {
57         int i;
58         struct ubi_wl_entry *e;
59
60         for (i = pool->used; i < pool->size; i++) {
61                 e = ubi->lookuptbl[pool->pebs[i]];
62                 return_unused_peb(ubi, e);
63         }
64 }
65
66 /**
67  * ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
68  * @ubi: UBI device description object
69  * @anchor: This PEB will be used as anchor PEB by fastmap
70  *
71  * The function returns a physical erase block with a given maximal number
72  * and removes it from the wl subsystem.
73  * Must be called with wl_lock held!
74  */
75 struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
76 {
77         struct ubi_wl_entry *e = NULL;
78
79         if (!ubi->free.rb_node)
80                 goto out;
81
82         if (anchor)
83                 e = find_anchor_wl_entry(&ubi->free);
84         else
85                 e = find_mean_wl_entry(ubi, &ubi->free);
86
87         if (!e)
88                 goto out;
89
90         self_check_in_wl_tree(ubi, e, &ubi->free);
91
92         /* remove it from the free list,
93          * the wl subsystem does no longer know this erase block */
94         rb_erase(&e->u.rb, &ubi->free);
95         ubi->free_count--;
96 out:
97         return e;
98 }
99
100 /*
101  * wait_free_pebs_for_pool - wait until there enough free pebs
102  * @ubi: UBI device description object
103  *
104  * Wait and execute do_work until there are enough free pebs, fill pool
105  * as much as we can. This will reduce pool refilling times, which can
106  * reduce the fastmap updating frequency.
107  */
108 static void wait_free_pebs_for_pool(struct ubi_device *ubi)
109 {
110         struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
111         struct ubi_fm_pool *pool = &ubi->fm_pool;
112         int free, expect_free, executed;
113         /*
114          * There are at least following free pebs which reserved by UBI:
115          * 1. WL_RESERVED_PEBS[1]
116          * 2. EBA_RESERVED_PEBS[1]
117          * 3. fm pebs - 1: Twice fastmap size deducted by fastmap and fm_anchor
118          * 4. beb_rsvd_pebs: This value should be get under lock ubi->wl_lock
119          */
120         int reserved = WL_RESERVED_PEBS + EBA_RESERVED_PEBS +
121                        ubi->fm_size / ubi->leb_size - 1 + ubi->fm_pool_rsv_cnt;
122
123         do {
124                 spin_lock(&ubi->wl_lock);
125                 free = ubi->free_count;
126                 free += pool->size - pool->used + wl_pool->size - wl_pool->used;
127                 expect_free = reserved + ubi->beb_rsvd_pebs;
128                 spin_unlock(&ubi->wl_lock);
129
130                 /*
131                  * Break out if there are no works or work is executed failure,
132                  * given the fact that erase_worker will schedule itself when
133                  * -EBUSY is returned from mtd layer caused by system shutdown.
134                  */
135                 if (do_work(ubi, &executed) || !executed)
136                         break;
137         } while (free < expect_free);
138 }
139
140 /*
141  * left_free_count - returns the number of free pebs to fill fm pools
142  * @ubi: UBI device description object
143  *
144  * This helper function returns the number of free pebs (deducted
145  * by fastmap pebs) to fill fm_pool and fm_wl_pool.
146  */
147 static int left_free_count(struct ubi_device *ubi)
148 {
149         int fm_used = 0;        // fastmap non anchor pebs.
150
151         if (!ubi->free.rb_node)
152                 return 0;
153
154         if (!ubi->ro_mode && !ubi->fm_disabled)
155                 fm_used = ubi->fm_size / ubi->leb_size - 1;
156
157         return ubi->free_count - fm_used;
158 }
159
160 /*
161  * can_fill_pools - whether free PEBs will be left after filling pools
162  * @ubi: UBI device description object
163  * @free: current number of free PEBs
164  *
165  * Return %1 if there are still left free PEBs after filling pools,
166  * otherwise %0 is returned.
167  */
168 static int can_fill_pools(struct ubi_device *ubi, int free)
169 {
170         struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
171         struct ubi_fm_pool *pool = &ubi->fm_pool;
172         int pool_need = pool->max_size - pool->size +
173                         wl_pool->max_size - wl_pool->size;
174
175         if (free - pool_need < 1)
176                 return 0;
177
178         return 1;
179 }
180
181 /**
182  * ubi_refill_pools_and_lock - refills all fastmap PEB pools and takes fm locks.
183  * @ubi: UBI device description object
184  */
185 void ubi_refill_pools_and_lock(struct ubi_device *ubi)
186 {
187         struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
188         struct ubi_fm_pool *pool = &ubi->fm_pool;
189         struct ubi_wl_entry *e;
190         int enough;
191
192         if (!ubi->ro_mode && !ubi->fm_disabled)
193                 wait_free_pebs_for_pool(ubi);
194
195         down_write(&ubi->fm_protect);
196         down_write(&ubi->work_sem);
197         down_write(&ubi->fm_eba_sem);
198
199         spin_lock(&ubi->wl_lock);
200
201         return_unused_pool_pebs(ubi, wl_pool);
202         return_unused_pool_pebs(ubi, pool);
203
204         wl_pool->size = 0;
205         pool->size = 0;
206
207         if (ubi->fm_anchor) {
208                 wl_tree_add(ubi->fm_anchor, &ubi->free);
209                 ubi->free_count++;
210                 ubi->fm_anchor = NULL;
211         }
212
213         if (!ubi->fm_disabled)
214                 /*
215                  * All available PEBs are in ubi->free, now is the time to get
216                  * the best anchor PEBs.
217                  */
218                 ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
219
220         for (;;) {
221                 enough = 0;
222                 if (pool->size < pool->max_size) {
223                         if (left_free_count(ubi) <= 0)
224                                 break;
225
226                         e = wl_get_wle(ubi);
227                         if (!e)
228                                 break;
229
230                         pool->pebs[pool->size] = e->pnum;
231                         pool->size++;
232                 } else
233                         enough++;
234
235                 if (wl_pool->size < wl_pool->max_size) {
236                         int left_free = left_free_count(ubi);
237
238                         if (left_free <= 0)
239                                 break;
240
241                         e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF,
242                                           !can_fill_pools(ubi, left_free));
243                         self_check_in_wl_tree(ubi, e, &ubi->free);
244                         rb_erase(&e->u.rb, &ubi->free);
245                         ubi->free_count--;
246
247                         wl_pool->pebs[wl_pool->size] = e->pnum;
248                         wl_pool->size++;
249                 } else
250                         enough++;
251
252                 if (enough == 2)
253                         break;
254         }
255
256         wl_pool->used = 0;
257         pool->used = 0;
258
259         spin_unlock(&ubi->wl_lock);
260 }
261
262 /**
263  * produce_free_peb - produce a free physical eraseblock.
264  * @ubi: UBI device description object
265  *
266  * This function tries to make a free PEB by means of synchronous execution of
267  * pending works. This may be needed if, for example the background thread is
268  * disabled. Returns zero in case of success and a negative error code in case
269  * of failure.
270  */
271 static int produce_free_peb(struct ubi_device *ubi)
272 {
273         int err;
274
275         while (!ubi->free.rb_node && ubi->works_count) {
276                 dbg_wl("do one work synchronously");
277                 err = do_work(ubi, NULL);
278
279                 if (err)
280                         return err;
281         }
282
283         return 0;
284 }
285
286 /**
287  * ubi_wl_get_peb - get a physical eraseblock.
288  * @ubi: UBI device description object
289  *
290  * This function returns a physical eraseblock in case of success and a
291  * negative error code in case of failure.
292  * Returns with ubi->fm_eba_sem held in read mode!
293  */
294 int ubi_wl_get_peb(struct ubi_device *ubi)
295 {
296         int ret, attempts = 0;
297         struct ubi_fm_pool *pool = &ubi->fm_pool;
298         struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
299
300 again:
301         down_read(&ubi->fm_eba_sem);
302         spin_lock(&ubi->wl_lock);
303
304         /* We check here also for the WL pool because at this point we can
305          * refill the WL pool synchronous. */
306         if (pool->used == pool->size || wl_pool->used == wl_pool->size) {
307                 spin_unlock(&ubi->wl_lock);
308                 up_read(&ubi->fm_eba_sem);
309                 ret = ubi_update_fastmap(ubi);
310                 if (ret) {
311                         ubi_msg(ubi, "Unable to write a new fastmap: %i", ret);
312                         down_read(&ubi->fm_eba_sem);
313                         return -ENOSPC;
314                 }
315                 down_read(&ubi->fm_eba_sem);
316                 spin_lock(&ubi->wl_lock);
317         }
318
319         if (pool->used == pool->size) {
320                 spin_unlock(&ubi->wl_lock);
321                 attempts++;
322                 if (attempts == 10) {
323                         ubi_err(ubi, "Unable to get a free PEB from user WL pool");
324                         ret = -ENOSPC;
325                         goto out;
326                 }
327                 up_read(&ubi->fm_eba_sem);
328                 ret = produce_free_peb(ubi);
329                 if (ret < 0) {
330                         down_read(&ubi->fm_eba_sem);
331                         goto out;
332                 }
333                 goto again;
334         }
335
336         ubi_assert(pool->used < pool->size);
337         ret = pool->pebs[pool->used++];
338         prot_queue_add(ubi, ubi->lookuptbl[ret]);
339         spin_unlock(&ubi->wl_lock);
340 out:
341         return ret;
342 }
343
344 /**
345  * next_peb_for_wl - returns next PEB to be used internally by the
346  * WL sub-system.
347  *
348  * @ubi: UBI device description object
349  */
350 static struct ubi_wl_entry *next_peb_for_wl(struct ubi_device *ubi)
351 {
352         struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
353         int pnum;
354
355         if (pool->used == pool->size)
356                 return NULL;
357
358         pnum = pool->pebs[pool->used];
359         return ubi->lookuptbl[pnum];
360 }
361
362 /**
363  * need_wear_leveling - checks whether to trigger a wear leveling work.
364  * UBI fetches free PEB from wl_pool, we check free PEBs from both 'wl_pool'
365  * and 'ubi->free', because free PEB in 'ubi->free' tree maybe moved into
366  * 'wl_pool' by ubi_refill_pools().
367  *
368  * @ubi: UBI device description object
369  */
370 static bool need_wear_leveling(struct ubi_device *ubi)
371 {
372         int ec;
373         struct ubi_wl_entry *e;
374
375         if (!ubi->used.rb_node)
376                 return false;
377
378         e = next_peb_for_wl(ubi);
379         if (!e) {
380                 if (!ubi->free.rb_node)
381                         return false;
382                 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
383                 ec = e->ec;
384         } else {
385                 ec = e->ec;
386                 if (ubi->free.rb_node) {
387                         e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
388                         ec = max(ec, e->ec);
389                 }
390         }
391         e = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
392
393         return ec - e->ec >= UBI_WL_THRESHOLD;
394 }
395
396 /* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
397  *
398  * @ubi: UBI device description object
399  */
400 static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
401 {
402         struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
403         int pnum;
404
405         ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
406
407         if (pool->used == pool->size) {
408                 /* We cannot update the fastmap here because this
409                  * function is called in atomic context.
410                  * Let's fail here and refill/update it as soon as possible. */
411                 if (!ubi->fm_work_scheduled) {
412                         ubi->fm_work_scheduled = 1;
413                         schedule_work(&ubi->fm_work);
414                 }
415                 return NULL;
416         }
417
418         pnum = pool->pebs[pool->used++];
419         return ubi->lookuptbl[pnum];
420 }
421
422 /**
423  * ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
424  * @ubi: UBI device description object
425  */
426 int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
427 {
428         struct ubi_work *wrk;
429         struct ubi_wl_entry *anchor;
430
431         spin_lock(&ubi->wl_lock);
432
433         /* Do we already have an anchor? */
434         if (ubi->fm_anchor) {
435                 spin_unlock(&ubi->wl_lock);
436                 return 0;
437         }
438
439         /* See if we can find an anchor PEB on the list of free PEBs */
440         anchor = ubi_wl_get_fm_peb(ubi, 1);
441         if (anchor) {
442                 ubi->fm_anchor = anchor;
443                 spin_unlock(&ubi->wl_lock);
444                 return 0;
445         }
446
447         ubi->fm_do_produce_anchor = 1;
448         /* No luck, trigger wear leveling to produce a new anchor PEB. */
449         if (ubi->wl_scheduled) {
450                 spin_unlock(&ubi->wl_lock);
451                 return 0;
452         }
453         ubi->wl_scheduled = 1;
454         spin_unlock(&ubi->wl_lock);
455
456         wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
457         if (!wrk) {
458                 spin_lock(&ubi->wl_lock);
459                 ubi->wl_scheduled = 0;
460                 spin_unlock(&ubi->wl_lock);
461                 return -ENOMEM;
462         }
463
464         wrk->func = &wear_leveling_worker;
465         __schedule_ubi_work(ubi, wrk);
466         return 0;
467 }
468
469 /**
470  * ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
471  * sub-system.
472  * see: ubi_wl_put_peb()
473  *
474  * @ubi: UBI device description object
475  * @fm_e: physical eraseblock to return
476  * @lnum: the last used logical eraseblock number for the PEB
477  * @torture: if this physical eraseblock has to be tortured
478  */
479 int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
480                       int lnum, int torture)
481 {
482         struct ubi_wl_entry *e;
483         int vol_id, pnum = fm_e->pnum;
484
485         dbg_wl("PEB %d", pnum);
486
487         ubi_assert(pnum >= 0);
488         ubi_assert(pnum < ubi->peb_count);
489
490         spin_lock(&ubi->wl_lock);
491         e = ubi->lookuptbl[pnum];
492
493         /* This can happen if we recovered from a fastmap the very
494          * first time and writing now a new one. In this case the wl system
495          * has never seen any PEB used by the original fastmap.
496          */
497         if (!e) {
498                 e = fm_e;
499                 ubi_assert(e->ec >= 0);
500                 ubi->lookuptbl[pnum] = e;
501         }
502
503         spin_unlock(&ubi->wl_lock);
504
505         vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
506         return schedule_erase(ubi, e, vol_id, lnum, torture, true);
507 }
508
509 /**
510  * ubi_is_erase_work - checks whether a work is erase work.
511  * @wrk: The work object to be checked
512  */
513 int ubi_is_erase_work(struct ubi_work *wrk)
514 {
515         return wrk->func == erase_worker;
516 }
517
518 static void ubi_fastmap_close(struct ubi_device *ubi)
519 {
520         int i;
521
522         return_unused_pool_pebs(ubi, &ubi->fm_pool);
523         return_unused_pool_pebs(ubi, &ubi->fm_wl_pool);
524
525         if (ubi->fm_anchor) {
526                 return_unused_peb(ubi, ubi->fm_anchor);
527                 ubi->fm_anchor = NULL;
528         }
529
530         if (ubi->fm) {
531                 for (i = 0; i < ubi->fm->used_blocks; i++)
532                         kfree(ubi->fm->e[i]);
533         }
534         kfree(ubi->fm);
535 }
536
537 /**
538  * may_reserve_for_fm - tests whether a PEB shall be reserved for fastmap.
539  * See find_mean_wl_entry()
540  *
541  * @ubi: UBI device description object
542  * @e: physical eraseblock to return
543  * @root: RB tree to test against.
544  */
545 static struct ubi_wl_entry *may_reserve_for_fm(struct ubi_device *ubi,
546                                            struct ubi_wl_entry *e,
547                                            struct rb_root *root) {
548         if (e && !ubi->fm_disabled && !ubi->fm && !ubi->fm_anchor &&
549             e->pnum < UBI_FM_MAX_START)
550                 e = rb_entry(rb_next(root->rb_node),
551                              struct ubi_wl_entry, u.rb);
552
553         return e;
554 }