1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) International Business Machines Corp., 2006
5 * Author: Artem Bityutskiy (Битюцкий Артём)
9 * The UBI Eraseblock Association (EBA) sub-system.
11 * This sub-system is responsible for I/O to/from logical eraseblock.
13 * Although in this implementation the EBA table is fully kept and managed in
14 * RAM, which assumes poor scalability, it might be (partially) maintained on
15 * flash in future implementations.
17 * The EBA sub-system implements per-logical eraseblock locking. Before
18 * accessing a logical eraseblock it is locked for reading or writing. The
19 * per-logical eraseblock locking is implemented by means of the lock tree. The
20 * lock tree is an RB-tree which refers all the currently locked logical
21 * eraseblocks. The lock tree elements are &struct ubi_ltree_entry objects.
22 * They are indexed by (@vol_id, @lnum) pairs.
24 * EBA also maintains the global sequence counter which is incremented each
25 * time a logical eraseblock is mapped to a physical eraseblock and it is
26 * stored in the volume identifier header. This means that each VID header has
27 * a unique sequence number. The sequence number is only increased an we assume
28 * 64 bits is enough to never overflow.
31 #include <linux/slab.h>
32 #include <linux/crc32.h>
33 #include <linux/err.h>
36 /* Number of physical eraseblocks reserved for atomic LEB change operation */
37 #define EBA_RESERVED_PEBS 1
40 * struct ubi_eba_entry - structure encoding a single LEB -> PEB association
41 * @pnum: the physical eraseblock number attached to the LEB
43 * This structure is encoding a LEB -> PEB association. Note that the LEB
44 * number is not stored here, because it is the index used to access the
47 struct ubi_eba_entry {
52 * struct ubi_eba_table - LEB -> PEB association information
53 * @entries: the LEB to PEB mapping (one entry per LEB).
55 * This structure is private to the EBA logic and should be kept here.
56 * It is encoding the LEB to PEB association table, and is subject to
59 struct ubi_eba_table {
60 struct ubi_eba_entry *entries;
64 * next_sqnum - get next sequence number.
65 * @ubi: UBI device description object
67 * This function returns next sequence number to use, which is just the current
68 * global sequence counter value. It also increases the global sequence
71 unsigned long long ubi_next_sqnum(struct ubi_device *ubi)
73 unsigned long long sqnum;
75 spin_lock(&ubi->ltree_lock);
76 sqnum = ubi->global_sqnum++;
77 spin_unlock(&ubi->ltree_lock);
83 * ubi_get_compat - get compatibility flags of a volume.
84 * @ubi: UBI device description object
87 * This function returns compatibility flags for an internal volume. User
88 * volumes have no compatibility flags, so %0 is returned.
90 static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
92 if (vol_id == UBI_LAYOUT_VOLUME_ID)
93 return UBI_LAYOUT_VOLUME_COMPAT;
98 * ubi_eba_get_ldesc - get information about a LEB
99 * @vol: volume description object
100 * @lnum: logical eraseblock number
101 * @ldesc: the LEB descriptor to fill
103 * Used to query information about a specific LEB.
104 * It is currently only returning the physical position of the LEB, but will be
105 * extended to provide more information.
107 void ubi_eba_get_ldesc(struct ubi_volume *vol, int lnum,
108 struct ubi_eba_leb_desc *ldesc)
111 ldesc->pnum = vol->eba_tbl->entries[lnum].pnum;
115 * ubi_eba_create_table - allocate a new EBA table and initialize it with all
117 * @vol: volume containing the EBA table to copy
118 * @nentries: number of entries in the table
120 * Allocate a new EBA table and initialize it with all LEBs unmapped.
121 * Returns a valid pointer if it succeed, an ERR_PTR() otherwise.
123 struct ubi_eba_table *ubi_eba_create_table(struct ubi_volume *vol,
126 struct ubi_eba_table *tbl;
130 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
132 return ERR_PTR(-ENOMEM);
134 tbl->entries = kmalloc_array(nentries, sizeof(*tbl->entries),
139 for (i = 0; i < nentries; i++)
140 tbl->entries[i].pnum = UBI_LEB_UNMAPPED;
151 * ubi_eba_destroy_table - destroy an EBA table
152 * @tbl: the table to destroy
154 * Destroy an EBA table.
156 void ubi_eba_destroy_table(struct ubi_eba_table *tbl)
166 * ubi_eba_copy_table - copy the EBA table attached to vol into another table
167 * @vol: volume containing the EBA table to copy
169 * @nentries: number of entries to copy
171 * Copy the EBA table stored in vol into the one pointed by dst.
173 void ubi_eba_copy_table(struct ubi_volume *vol, struct ubi_eba_table *dst,
176 struct ubi_eba_table *src;
179 ubi_assert(dst && vol && vol->eba_tbl);
183 for (i = 0; i < nentries; i++)
184 dst->entries[i].pnum = src->entries[i].pnum;
188 * ubi_eba_replace_table - assign a new EBA table to a volume
189 * @vol: volume containing the EBA table to copy
190 * @tbl: new EBA table
192 * Assign a new EBA table to the volume and release the old one.
194 void ubi_eba_replace_table(struct ubi_volume *vol, struct ubi_eba_table *tbl)
196 ubi_eba_destroy_table(vol->eba_tbl);
201 * ltree_lookup - look up the lock tree.
202 * @ubi: UBI device description object
204 * @lnum: logical eraseblock number
206 * This function returns a pointer to the corresponding &struct ubi_ltree_entry
207 * object if the logical eraseblock is locked and %NULL if it is not.
208 * @ubi->ltree_lock has to be locked.
210 static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
215 p = ubi->ltree.rb_node;
217 struct ubi_ltree_entry *le;
219 le = rb_entry(p, struct ubi_ltree_entry, rb);
221 if (vol_id < le->vol_id)
223 else if (vol_id > le->vol_id)
228 else if (lnum > le->lnum)
239 * ltree_add_entry - add new entry to the lock tree.
240 * @ubi: UBI device description object
242 * @lnum: logical eraseblock number
244 * This function adds new entry for logical eraseblock (@vol_id, @lnum) to the
245 * lock tree. If such entry is already there, its usage counter is increased.
246 * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
249 static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
250 int vol_id, int lnum)
252 struct ubi_ltree_entry *le, *le1, *le_free;
254 le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS);
256 return ERR_PTR(-ENOMEM);
259 init_rwsem(&le->mutex);
263 spin_lock(&ubi->ltree_lock);
264 le1 = ltree_lookup(ubi, vol_id, lnum);
268 * This logical eraseblock is already locked. The newly
269 * allocated lock entry is not needed.
274 struct rb_node **p, *parent = NULL;
277 * No lock entry, add the newly allocated one to the
278 * @ubi->ltree RB-tree.
282 p = &ubi->ltree.rb_node;
285 le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
287 if (vol_id < le1->vol_id)
289 else if (vol_id > le1->vol_id)
292 ubi_assert(lnum != le1->lnum);
293 if (lnum < le1->lnum)
300 rb_link_node(&le->rb, parent, p);
301 rb_insert_color(&le->rb, &ubi->ltree);
304 spin_unlock(&ubi->ltree_lock);
311 * leb_read_lock - lock logical eraseblock for reading.
312 * @ubi: UBI device description object
314 * @lnum: logical eraseblock number
316 * This function locks a logical eraseblock for reading. Returns zero in case
317 * of success and a negative error code in case of failure.
319 static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
321 struct ubi_ltree_entry *le;
323 le = ltree_add_entry(ubi, vol_id, lnum);
326 down_read(&le->mutex);
331 * leb_read_unlock - unlock logical eraseblock.
332 * @ubi: UBI device description object
334 * @lnum: logical eraseblock number
336 static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
338 struct ubi_ltree_entry *le;
340 spin_lock(&ubi->ltree_lock);
341 le = ltree_lookup(ubi, vol_id, lnum);
343 ubi_assert(le->users >= 0);
345 if (le->users == 0) {
346 rb_erase(&le->rb, &ubi->ltree);
349 spin_unlock(&ubi->ltree_lock);
353 * leb_write_lock - lock logical eraseblock for writing.
354 * @ubi: UBI device description object
356 * @lnum: logical eraseblock number
358 * This function locks a logical eraseblock for writing. Returns zero in case
359 * of success and a negative error code in case of failure.
361 static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
363 struct ubi_ltree_entry *le;
365 le = ltree_add_entry(ubi, vol_id, lnum);
368 down_write(&le->mutex);
373 * leb_write_trylock - try to lock logical eraseblock for writing.
374 * @ubi: UBI device description object
376 * @lnum: logical eraseblock number
378 * This function locks a logical eraseblock for writing if there is no
379 * contention and does nothing if there is contention. Returns %0 in case of
380 * success, %1 in case of contention, and and a negative error code in case of
383 static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
385 struct ubi_ltree_entry *le;
387 le = ltree_add_entry(ubi, vol_id, lnum);
390 if (down_write_trylock(&le->mutex))
393 /* Contention, cancel */
394 spin_lock(&ubi->ltree_lock);
396 ubi_assert(le->users >= 0);
397 if (le->users == 0) {
398 rb_erase(&le->rb, &ubi->ltree);
401 spin_unlock(&ubi->ltree_lock);
407 * leb_write_unlock - unlock logical eraseblock.
408 * @ubi: UBI device description object
410 * @lnum: logical eraseblock number
412 static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
414 struct ubi_ltree_entry *le;
416 spin_lock(&ubi->ltree_lock);
417 le = ltree_lookup(ubi, vol_id, lnum);
419 ubi_assert(le->users >= 0);
420 up_write(&le->mutex);
421 if (le->users == 0) {
422 rb_erase(&le->rb, &ubi->ltree);
425 spin_unlock(&ubi->ltree_lock);
429 * ubi_eba_is_mapped - check if a LEB is mapped.
430 * @vol: volume description object
431 * @lnum: logical eraseblock number
433 * This function returns true if the LEB is mapped, false otherwise.
435 bool ubi_eba_is_mapped(struct ubi_volume *vol, int lnum)
437 return vol->eba_tbl->entries[lnum].pnum >= 0;
441 * ubi_eba_unmap_leb - un-map logical eraseblock.
442 * @ubi: UBI device description object
443 * @vol: volume description object
444 * @lnum: logical eraseblock number
446 * This function un-maps logical eraseblock @lnum and schedules corresponding
447 * physical eraseblock for erasure. Returns zero in case of success and a
448 * negative error code in case of failure.
450 int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
453 int err, pnum, vol_id = vol->vol_id;
458 err = leb_write_lock(ubi, vol_id, lnum);
462 pnum = vol->eba_tbl->entries[lnum].pnum;
464 /* This logical eraseblock is already unmapped */
467 dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
469 down_read(&ubi->fm_eba_sem);
470 vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED;
471 up_read(&ubi->fm_eba_sem);
472 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
475 leb_write_unlock(ubi, vol_id, lnum);
479 #ifdef CONFIG_MTD_UBI_FASTMAP
481 * check_mapping - check and fixup a mapping
482 * @ubi: UBI device description object
483 * @vol: volume description object
484 * @lnum: logical eraseblock number
485 * @pnum: physical eraseblock number
487 * Checks whether a given mapping is valid. Fastmap cannot track LEB unmap
488 * operations, if such an operation is interrupted the mapping still looks
489 * good, but upon first read an ECC is reported to the upper layer.
490 * Normaly during the full-scan at attach time this is fixed, for Fastmap
491 * we have to deal with it while reading.
492 * If the PEB behind a LEB shows this symthom we change the mapping to
493 * %UBI_LEB_UNMAPPED and schedule the PEB for erasure.
495 * Returns 0 on success, negative error code in case of failure.
497 static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
501 struct ubi_vid_io_buf *vidb;
502 struct ubi_vid_hdr *vid_hdr;
504 if (!ubi->fast_attach)
507 if (!vol->checkmap || test_bit(lnum, vol->checkmap))
510 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
514 err = ubi_io_read_vid_hdr(ubi, *pnum, vidb, 0);
515 if (err > 0 && err != UBI_IO_BITFLIPS) {
520 case UBI_IO_FF_BITFLIPS:
522 case UBI_IO_BAD_HDR_EBADMSG:
528 if (err == UBI_IO_BAD_HDR_EBADMSG || err == UBI_IO_FF_BITFLIPS)
531 down_read(&ubi->fm_eba_sem);
532 vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED;
533 up_read(&ubi->fm_eba_sem);
534 ubi_wl_put_peb(ubi, vol->vol_id, lnum, *pnum, torture);
536 *pnum = UBI_LEB_UNMAPPED;
537 } else if (err < 0) {
538 ubi_err(ubi, "unable to read VID header back from PEB %i: %i",
543 int found_vol_id, found_lnum;
545 ubi_assert(err == 0 || err == UBI_IO_BITFLIPS);
547 vid_hdr = ubi_get_vid_hdr(vidb);
548 found_vol_id = be32_to_cpu(vid_hdr->vol_id);
549 found_lnum = be32_to_cpu(vid_hdr->lnum);
551 if (found_lnum != lnum || found_vol_id != vol->vol_id) {
552 ubi_err(ubi, "EBA mismatch! PEB %i is LEB %i:%i instead of LEB %i:%i",
553 *pnum, found_vol_id, found_lnum, vol->vol_id, lnum);
560 set_bit(lnum, vol->checkmap);
564 ubi_free_vid_buf(vidb);
569 static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
577 * ubi_eba_read_leb - read data.
578 * @ubi: UBI device description object
579 * @vol: volume description object
580 * @lnum: logical eraseblock number
581 * @buf: buffer to store the read data
582 * @offset: offset from where to read
583 * @len: how many bytes to read
584 * @check: data CRC check flag
586 * If the logical eraseblock @lnum is unmapped, @buf is filled with 0xFF
587 * bytes. The @check flag only makes sense for static volumes and forces
588 * eraseblock data CRC checking.
590 * In case of success this function returns zero. In case of a static volume,
591 * if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be
592 * returned for any volume type if an ECC error was detected by the MTD device
593 * driver. Other negative error cored may be returned in case of other errors.
595 int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
596 void *buf, int offset, int len, int check)
598 int err, pnum, scrub = 0, vol_id = vol->vol_id;
599 struct ubi_vid_io_buf *vidb;
600 struct ubi_vid_hdr *vid_hdr;
603 err = leb_read_lock(ubi, vol_id, lnum);
607 pnum = vol->eba_tbl->entries[lnum].pnum;
609 err = check_mapping(ubi, vol, lnum, &pnum);
614 if (pnum == UBI_LEB_UNMAPPED) {
616 * The logical eraseblock is not mapped, fill the whole buffer
617 * with 0xFF bytes. The exception is static volumes for which
618 * it is an error to read unmapped logical eraseblocks.
620 dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)",
621 len, offset, vol_id, lnum);
622 leb_read_unlock(ubi, vol_id, lnum);
623 ubi_assert(vol->vol_type != UBI_STATIC_VOLUME);
624 memset(buf, 0xFF, len);
628 dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d",
629 len, offset, vol_id, lnum, pnum);
631 if (vol->vol_type == UBI_DYNAMIC_VOLUME)
636 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
642 vid_hdr = ubi_get_vid_hdr(vidb);
644 err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1);
645 if (err && err != UBI_IO_BITFLIPS) {
648 * The header is either absent or corrupted.
649 * The former case means there is a bug -
650 * switch to read-only mode just in case.
651 * The latter case means a real corruption - we
652 * may try to recover data. FIXME: but this is
655 if (err == UBI_IO_BAD_HDR_EBADMSG ||
656 err == UBI_IO_BAD_HDR) {
657 ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d",
662 * Ending up here in the non-Fastmap case
663 * is a clear bug as the VID header had to
664 * be present at scan time to have it referenced.
665 * With fastmap the story is more complicated.
666 * Fastmap has the mapping info without the need
667 * of a full scan. So the LEB could have been
668 * unmapped, Fastmap cannot know this and keeps
669 * the LEB referenced.
670 * This is valid and works as the layer above UBI
671 * has to do bookkeeping about used/referenced
674 if (ubi->fast_attach) {
683 } else if (err == UBI_IO_BITFLIPS)
686 ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs));
687 ubi_assert(len == be32_to_cpu(vid_hdr->data_size));
689 crc = be32_to_cpu(vid_hdr->data_crc);
690 ubi_free_vid_buf(vidb);
693 err = ubi_io_read_data(ubi, buf, pnum, offset, len);
695 if (err == UBI_IO_BITFLIPS)
697 else if (mtd_is_eccerr(err)) {
698 if (vol->vol_type == UBI_DYNAMIC_VOLUME)
702 ubi_msg(ubi, "force data checking");
711 uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len);
713 ubi_warn(ubi, "CRC error: calculated %#08x, must be %#08x",
721 err = ubi_wl_scrub_peb(ubi, pnum);
723 leb_read_unlock(ubi, vol_id, lnum);
727 ubi_free_vid_buf(vidb);
729 leb_read_unlock(ubi, vol_id, lnum);
734 * ubi_eba_read_leb_sg - read data into a scatter gather list.
735 * @ubi: UBI device description object
736 * @vol: volume description object
737 * @lnum: logical eraseblock number
738 * @sgl: UBI scatter gather list to store the read data
739 * @offset: offset from where to read
740 * @len: how many bytes to read
741 * @check: data CRC check flag
743 * This function works exactly like ubi_eba_read_leb(). But instead of
744 * storing the read data into a buffer it writes to an UBI scatter gather
747 int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol,
748 struct ubi_sgl *sgl, int lnum, int offset, int len,
753 struct scatterlist *sg;
756 ubi_assert(sgl->list_pos < UBI_MAX_SG_COUNT);
757 sg = &sgl->sg[sgl->list_pos];
758 if (len < sg->length - sgl->page_pos)
761 to_read = sg->length - sgl->page_pos;
763 ret = ubi_eba_read_leb(ubi, vol, lnum,
764 sg_virt(sg) + sgl->page_pos, offset,
772 sgl->page_pos += to_read;
773 if (sgl->page_pos == sg->length) {
789 * try_recover_peb - try to recover from write failure.
790 * @vol: volume description object
791 * @pnum: the physical eraseblock to recover
792 * @lnum: logical eraseblock number
793 * @buf: data which was not written because of the write failure
794 * @offset: offset of the failed write
795 * @len: how many bytes should have been written
797 * @retry: whether the caller should retry in case of failure
799 * This function is called in case of a write failure and moves all good data
800 * from the potentially bad physical eraseblock to a good physical eraseblock.
801 * This function also writes the data which was not written due to the failure.
802 * Returns 0 in case of success, and a negative error code in case of failure.
803 * In case of failure, the %retry parameter is set to false if this is a fatal
804 * error (retrying won't help), and true otherwise.
806 static int try_recover_peb(struct ubi_volume *vol, int pnum, int lnum,
807 const void *buf, int offset, int len,
808 struct ubi_vid_io_buf *vidb, bool *retry)
810 struct ubi_device *ubi = vol->ubi;
811 struct ubi_vid_hdr *vid_hdr;
812 int new_pnum, err, vol_id = vol->vol_id, data_size;
817 new_pnum = ubi_wl_get_peb(ubi);
823 ubi_msg(ubi, "recover PEB %d, move data to PEB %d",
826 err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1);
827 if (err && err != UBI_IO_BITFLIPS) {
833 vid_hdr = ubi_get_vid_hdr(vidb);
834 ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
836 mutex_lock(&ubi->buf_mutex);
837 memset(ubi->peb_buf + offset, 0xFF, len);
839 /* Read everything before the area where the write failure happened */
841 err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
842 if (err && err != UBI_IO_BITFLIPS)
848 memcpy(ubi->peb_buf + offset, buf, len);
850 data_size = offset + len;
851 crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
852 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
853 vid_hdr->copy_flag = 1;
854 vid_hdr->data_size = cpu_to_be32(data_size);
855 vid_hdr->data_crc = cpu_to_be32(crc);
856 err = ubi_io_write_vid_hdr(ubi, new_pnum, vidb);
860 err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
863 mutex_unlock(&ubi->buf_mutex);
866 vol->eba_tbl->entries[lnum].pnum = new_pnum;
869 up_read(&ubi->fm_eba_sem);
872 ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
873 ubi_msg(ubi, "data was successfully recovered");
874 } else if (new_pnum >= 0) {
876 * Bad luck? This physical eraseblock is bad too? Crud. Let's
877 * try to get another one.
879 ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
880 ubi_warn(ubi, "failed to write to PEB %d", new_pnum);
887 * recover_peb - recover from write failure.
888 * @ubi: UBI device description object
889 * @pnum: the physical eraseblock to recover
891 * @lnum: logical eraseblock number
892 * @buf: data which was not written because of the write failure
893 * @offset: offset of the failed write
894 * @len: how many bytes should have been written
896 * This function is called in case of a write failure and moves all good data
897 * from the potentially bad physical eraseblock to a good physical eraseblock.
898 * This function also writes the data which was not written due to the failure.
899 * Returns 0 in case of success, and a negative error code in case of failure.
900 * This function tries %UBI_IO_RETRIES before giving up.
902 static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
903 const void *buf, int offset, int len)
905 int err, idx = vol_id2idx(ubi, vol_id), tries;
906 struct ubi_volume *vol = ubi->volumes[idx];
907 struct ubi_vid_io_buf *vidb;
909 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
913 for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
916 err = try_recover_peb(vol, pnum, lnum, buf, offset, len, vidb,
921 ubi_msg(ubi, "try again");
924 ubi_free_vid_buf(vidb);
930 * try_write_vid_and_data - try to write VID header and data to a new PEB.
931 * @vol: volume description object
932 * @lnum: logical eraseblock number
933 * @vidb: the VID buffer to write
934 * @buf: buffer containing the data
935 * @offset: where to start writing data
936 * @len: how many bytes should be written
938 * This function tries to write VID header and data belonging to logical
939 * eraseblock @lnum of volume @vol to a new physical eraseblock. Returns zero
940 * in case of success and a negative error code in case of failure.
941 * In case of error, it is possible that something was still written to the
942 * flash media, but may be some garbage.
944 static int try_write_vid_and_data(struct ubi_volume *vol, int lnum,
945 struct ubi_vid_io_buf *vidb, const void *buf,
948 struct ubi_device *ubi = vol->ubi;
949 int pnum, opnum, err, vol_id = vol->vol_id;
951 pnum = ubi_wl_get_peb(ubi);
957 opnum = vol->eba_tbl->entries[lnum].pnum;
959 dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
960 len, offset, vol_id, lnum, pnum);
962 err = ubi_io_write_vid_hdr(ubi, pnum, vidb);
964 ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
970 err = ubi_io_write_data(ubi, buf, pnum, offset, len);
973 "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
974 len, offset, vol_id, lnum, pnum);
979 vol->eba_tbl->entries[lnum].pnum = pnum;
982 up_read(&ubi->fm_eba_sem);
984 if (err && pnum >= 0)
985 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
986 else if (!err && opnum >= 0)
987 err = ubi_wl_put_peb(ubi, vol_id, lnum, opnum, 0);
993 * ubi_eba_write_leb - write data to dynamic volume.
994 * @ubi: UBI device description object
995 * @vol: volume description object
996 * @lnum: logical eraseblock number
997 * @buf: the data to write
998 * @offset: offset within the logical eraseblock where to write
999 * @len: how many bytes to write
1001 * This function writes data to logical eraseblock @lnum of a dynamic volume
1002 * @vol. Returns zero in case of success and a negative error code in case
1003 * of failure. In case of error, it is possible that something was still
1004 * written to the flash media, but may be some garbage.
1005 * This function retries %UBI_IO_RETRIES times before giving up.
1007 int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
1008 const void *buf, int offset, int len)
1010 int err, pnum, tries, vol_id = vol->vol_id;
1011 struct ubi_vid_io_buf *vidb;
1012 struct ubi_vid_hdr *vid_hdr;
1017 err = leb_write_lock(ubi, vol_id, lnum);
1021 pnum = vol->eba_tbl->entries[lnum].pnum;
1023 err = check_mapping(ubi, vol, lnum, &pnum);
1029 dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
1030 len, offset, vol_id, lnum, pnum);
1032 err = ubi_io_write_data(ubi, buf, pnum, offset, len);
1034 ubi_warn(ubi, "failed to write data to PEB %d", pnum);
1035 if (err == -EIO && ubi->bad_allowed)
1036 err = recover_peb(ubi, pnum, vol_id, lnum, buf,
1044 * The logical eraseblock is not mapped. We have to get a free physical
1045 * eraseblock and write the volume identifier header there first.
1047 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
1049 leb_write_unlock(ubi, vol_id, lnum);
1053 vid_hdr = ubi_get_vid_hdr(vidb);
1055 vid_hdr->vol_type = UBI_VID_DYNAMIC;
1056 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1057 vid_hdr->vol_id = cpu_to_be32(vol_id);
1058 vid_hdr->lnum = cpu_to_be32(lnum);
1059 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
1060 vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
1062 for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
1063 err = try_write_vid_and_data(vol, lnum, vidb, buf, offset, len);
1064 if (err != -EIO || !ubi->bad_allowed)
1068 * Fortunately, this is the first write operation to this
1069 * physical eraseblock, so just put it and request a new one.
1070 * We assume that if this physical eraseblock went bad, the
1071 * erase code will handle that.
1073 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1074 ubi_msg(ubi, "try another PEB");
1077 ubi_free_vid_buf(vidb);
1083 leb_write_unlock(ubi, vol_id, lnum);
1089 * ubi_eba_write_leb_st - write data to static volume.
1090 * @ubi: UBI device description object
1091 * @vol: volume description object
1092 * @lnum: logical eraseblock number
1093 * @buf: data to write
1094 * @len: how many bytes to write
1095 * @used_ebs: how many logical eraseblocks will this volume contain
1097 * This function writes data to logical eraseblock @lnum of static volume
1098 * @vol. The @used_ebs argument should contain total number of logical
1099 * eraseblock in this static volume.
1101 * When writing to the last logical eraseblock, the @len argument doesn't have
1102 * to be aligned to the minimal I/O unit size. Instead, it has to be equivalent
1103 * to the real data size, although the @buf buffer has to contain the
1104 * alignment. In all other cases, @len has to be aligned.
1106 * It is prohibited to write more than once to logical eraseblocks of static
1107 * volumes. This function returns zero in case of success and a negative error
1108 * code in case of failure.
1110 int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
1111 int lnum, const void *buf, int len, int used_ebs)
1113 int err, tries, data_size = len, vol_id = vol->vol_id;
1114 struct ubi_vid_io_buf *vidb;
1115 struct ubi_vid_hdr *vid_hdr;
1121 if (lnum == used_ebs - 1)
1122 /* If this is the last LEB @len may be unaligned */
1123 len = ALIGN(data_size, ubi->min_io_size);
1125 ubi_assert(!(len & (ubi->min_io_size - 1)));
1127 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
1131 vid_hdr = ubi_get_vid_hdr(vidb);
1133 err = leb_write_lock(ubi, vol_id, lnum);
1137 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1138 vid_hdr->vol_id = cpu_to_be32(vol_id);
1139 vid_hdr->lnum = cpu_to_be32(lnum);
1140 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
1141 vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
1143 crc = crc32(UBI_CRC32_INIT, buf, data_size);
1144 vid_hdr->vol_type = UBI_VID_STATIC;
1145 vid_hdr->data_size = cpu_to_be32(data_size);
1146 vid_hdr->used_ebs = cpu_to_be32(used_ebs);
1147 vid_hdr->data_crc = cpu_to_be32(crc);
1149 ubi_assert(vol->eba_tbl->entries[lnum].pnum < 0);
1151 for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
1152 err = try_write_vid_and_data(vol, lnum, vidb, buf, 0, len);
1153 if (err != -EIO || !ubi->bad_allowed)
1156 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1157 ubi_msg(ubi, "try another PEB");
1163 leb_write_unlock(ubi, vol_id, lnum);
1166 ubi_free_vid_buf(vidb);
1172 * ubi_eba_atomic_leb_change - change logical eraseblock atomically.
1173 * @ubi: UBI device description object
1174 * @vol: volume description object
1175 * @lnum: logical eraseblock number
1176 * @buf: data to write
1177 * @len: how many bytes to write
1179 * This function changes the contents of a logical eraseblock atomically. @buf
1180 * has to contain new logical eraseblock data, and @len - the length of the
1181 * data, which has to be aligned. This function guarantees that in case of an
1182 * unclean reboot the old contents is preserved. Returns zero in case of
1183 * success and a negative error code in case of failure.
1185 * UBI reserves one LEB for the "atomic LEB change" operation, so only one
1186 * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
1188 int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
1189 int lnum, const void *buf, int len)
1191 int err, tries, vol_id = vol->vol_id;
1192 struct ubi_vid_io_buf *vidb;
1193 struct ubi_vid_hdr *vid_hdr;
1201 * Special case when data length is zero. In this case the LEB
1202 * has to be unmapped and mapped somewhere else.
1204 err = ubi_eba_unmap_leb(ubi, vol, lnum);
1207 return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
1210 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
1214 vid_hdr = ubi_get_vid_hdr(vidb);
1216 mutex_lock(&ubi->alc_mutex);
1217 err = leb_write_lock(ubi, vol_id, lnum);
1221 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1222 vid_hdr->vol_id = cpu_to_be32(vol_id);
1223 vid_hdr->lnum = cpu_to_be32(lnum);
1224 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
1225 vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
1227 crc = crc32(UBI_CRC32_INIT, buf, len);
1228 vid_hdr->vol_type = UBI_VID_DYNAMIC;
1229 vid_hdr->data_size = cpu_to_be32(len);
1230 vid_hdr->copy_flag = 1;
1231 vid_hdr->data_crc = cpu_to_be32(crc);
1233 dbg_eba("change LEB %d:%d", vol_id, lnum);
1235 for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
1236 err = try_write_vid_and_data(vol, lnum, vidb, buf, 0, len);
1237 if (err != -EIO || !ubi->bad_allowed)
1240 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1241 ubi_msg(ubi, "try another PEB");
1245 * This flash device does not admit of bad eraseblocks or
1246 * something nasty and unexpected happened. Switch to read-only
1247 * mode just in case.
1252 leb_write_unlock(ubi, vol_id, lnum);
1255 mutex_unlock(&ubi->alc_mutex);
1256 ubi_free_vid_buf(vidb);
1261 * is_error_sane - check whether a read error is sane.
1262 * @err: code of the error happened during reading
1264 * This is a helper function for 'ubi_eba_copy_leb()' which is called when we
1265 * cannot read data from the target PEB (an error @err happened). If the error
1266 * code is sane, then we treat this error as non-fatal. Otherwise the error is
1267 * fatal and UBI will be switched to R/O mode later.
1269 * The idea is that we try not to switch to R/O mode if the read error is
1270 * something which suggests there was a real read problem. E.g., %-EIO. Or a
1271 * memory allocation failed (-%ENOMEM). Otherwise, it is safer to switch to R/O
1272 * mode, simply because we do not know what happened at the MTD level, and we
1273 * cannot handle this. E.g., the underlying driver may have become crazy, and
1274 * it is safer to switch to R/O mode to preserve the data.
1276 * And bear in mind, this is about reading from the target PEB, i.e. the PEB
1277 * which we have just written.
1279 static int is_error_sane(int err)
1281 if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_HDR ||
1282 err == UBI_IO_BAD_HDR_EBADMSG || err == -ETIMEDOUT)
1288 * ubi_eba_copy_leb - copy logical eraseblock.
1289 * @ubi: UBI device description object
1290 * @from: physical eraseblock number from where to copy
1291 * @to: physical eraseblock number where to copy
1292 * @vidb: data structure from where the VID header is derived
1294 * This function copies logical eraseblock from physical eraseblock @from to
1295 * physical eraseblock @to. The @vid_hdr buffer may be changed by this
1296 * function. Returns:
1297 * o %0 in case of success;
1298 * o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_TARGET_BITFLIPS, etc;
1299 * o a negative error code in case of failure.
1301 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1302 struct ubi_vid_io_buf *vidb)
1304 int err, vol_id, lnum, data_size, aldata_size, idx;
1305 struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb);
1306 struct ubi_volume *vol;
1309 ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
1311 vol_id = be32_to_cpu(vid_hdr->vol_id);
1312 lnum = be32_to_cpu(vid_hdr->lnum);
1314 dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
1316 if (vid_hdr->vol_type == UBI_VID_STATIC) {
1317 data_size = be32_to_cpu(vid_hdr->data_size);
1318 aldata_size = ALIGN(data_size, ubi->min_io_size);
1320 data_size = aldata_size =
1321 ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
1323 idx = vol_id2idx(ubi, vol_id);
1324 spin_lock(&ubi->volumes_lock);
1326 * Note, we may race with volume deletion, which means that the volume
1327 * this logical eraseblock belongs to might be being deleted. Since the
1328 * volume deletion un-maps all the volume's logical eraseblocks, it will
1329 * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
1331 vol = ubi->volumes[idx];
1332 spin_unlock(&ubi->volumes_lock);
1334 /* No need to do further work, cancel */
1335 dbg_wl("volume %d is being removed, cancel", vol_id);
1336 return MOVE_CANCEL_RACE;
1340 * We do not want anybody to write to this logical eraseblock while we
1341 * are moving it, so lock it.
1343 * Note, we are using non-waiting locking here, because we cannot sleep
1344 * on the LEB, since it may cause deadlocks. Indeed, imagine a task is
1345 * unmapping the LEB which is mapped to the PEB we are going to move
1346 * (@from). This task locks the LEB and goes sleep in the
1347 * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
1348 * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
1349 * LEB is already locked, we just do not move it and return
1350 * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
1351 * we do not know the reasons of the contention - it may be just a
1352 * normal I/O on this LEB, so we want to re-try.
1354 err = leb_write_trylock(ubi, vol_id, lnum);
1356 dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
1361 * The LEB might have been put meanwhile, and the task which put it is
1362 * probably waiting on @ubi->move_mutex. No need to continue the work,
1365 if (vol->eba_tbl->entries[lnum].pnum != from) {
1366 dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel",
1367 vol_id, lnum, from, vol->eba_tbl->entries[lnum].pnum);
1368 err = MOVE_CANCEL_RACE;
1369 goto out_unlock_leb;
1373 * OK, now the LEB is locked and we can safely start moving it. Since
1374 * this function utilizes the @ubi->peb_buf buffer which is shared
1375 * with some other functions - we lock the buffer by taking the
1378 mutex_lock(&ubi->buf_mutex);
1379 dbg_wl("read %d bytes of data", aldata_size);
1380 err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size);
1381 if (err && err != UBI_IO_BITFLIPS) {
1382 ubi_warn(ubi, "error %d while reading data from PEB %d",
1384 err = MOVE_SOURCE_RD_ERR;
1385 goto out_unlock_buf;
1389 * Now we have got to calculate how much data we have to copy. In
1390 * case of a static volume it is fairly easy - the VID header contains
1391 * the data size. In case of a dynamic volume it is more difficult - we
1392 * have to read the contents, cut 0xFF bytes from the end and copy only
1393 * the first part. We must do this to avoid writing 0xFF bytes as it
1394 * may have some side-effects. And not only this. It is important not
1395 * to include those 0xFFs to CRC because later the they may be filled
1398 if (vid_hdr->vol_type == UBI_VID_DYNAMIC)
1399 aldata_size = data_size =
1400 ubi_calc_data_len(ubi, ubi->peb_buf, data_size);
1403 crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
1407 * It may turn out to be that the whole @from physical eraseblock
1408 * contains only 0xFF bytes. Then we have to only write the VID header
1409 * and do not write any data. This also means we should not set
1410 * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc.
1412 if (data_size > 0) {
1413 vid_hdr->copy_flag = 1;
1414 vid_hdr->data_size = cpu_to_be32(data_size);
1415 vid_hdr->data_crc = cpu_to_be32(crc);
1417 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1419 err = ubi_io_write_vid_hdr(ubi, to, vidb);
1422 err = MOVE_TARGET_WR_ERR;
1423 goto out_unlock_buf;
1428 /* Read the VID header back and check if it was written correctly */
1429 err = ubi_io_read_vid_hdr(ubi, to, vidb, 1);
1431 if (err != UBI_IO_BITFLIPS) {
1432 ubi_warn(ubi, "error %d while reading VID header back from PEB %d",
1434 if (is_error_sane(err))
1435 err = MOVE_TARGET_RD_ERR;
1437 err = MOVE_TARGET_BITFLIPS;
1438 goto out_unlock_buf;
1441 if (data_size > 0) {
1442 err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size);
1445 err = MOVE_TARGET_WR_ERR;
1446 goto out_unlock_buf;
1452 ubi_assert(vol->eba_tbl->entries[lnum].pnum == from);
1453 vol->eba_tbl->entries[lnum].pnum = to;
1456 mutex_unlock(&ubi->buf_mutex);
1458 leb_write_unlock(ubi, vol_id, lnum);
1463 * print_rsvd_warning - warn about not having enough reserved PEBs.
1464 * @ubi: UBI device description object
1465 * @ai: UBI attach info object
1467 * This is a helper function for 'ubi_eba_init()' which is called when UBI
1468 * cannot reserve enough PEBs for bad block handling. This function makes a
1469 * decision whether we have to print a warning or not. The algorithm is as
1471 * o if this is a new UBI image, then just print the warning
1472 * o if this is an UBI image which has already been used for some time, print
1473 * a warning only if we can reserve less than 10% of the expected amount of
1476 * The idea is that when UBI is used, PEBs become bad, and the reserved pool
1477 * of PEBs becomes smaller, which is normal and we do not want to scare users
1478 * with a warning every time they attach the MTD device. This was an issue
1479 * reported by real users.
1481 static void print_rsvd_warning(struct ubi_device *ubi,
1482 struct ubi_attach_info *ai)
1485 * The 1 << 18 (256KiB) number is picked randomly, just a reasonably
1486 * large number to distinguish between newly flashed and used images.
1488 if (ai->max_sqnum > (1 << 18)) {
1489 int min = ubi->beb_rsvd_level / 10;
1493 if (ubi->beb_rsvd_pebs > min)
1497 ubi_warn(ubi, "cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
1498 ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
1499 if (ubi->corr_peb_count)
1500 ubi_warn(ubi, "%d PEBs are corrupted and not used",
1501 ubi->corr_peb_count);
1505 * self_check_eba - run a self check on the EBA table constructed by fastmap.
1506 * @ubi: UBI device description object
1507 * @ai_fastmap: UBI attach info object created by fastmap
1508 * @ai_scan: UBI attach info object created by scanning
1510 * Returns < 0 in case of an internal error, 0 otherwise.
1511 * If a bad EBA table entry was found it will be printed out and
1512 * ubi_assert() triggers.
1514 int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
1515 struct ubi_attach_info *ai_scan)
1517 int i, j, num_volumes, ret = 0;
1518 int **scan_eba, **fm_eba;
1519 struct ubi_ainf_volume *av;
1520 struct ubi_volume *vol;
1521 struct ubi_ainf_peb *aeb;
1524 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1526 scan_eba = kmalloc_array(num_volumes, sizeof(*scan_eba), GFP_KERNEL);
1530 fm_eba = kmalloc_array(num_volumes, sizeof(*fm_eba), GFP_KERNEL);
1536 for (i = 0; i < num_volumes; i++) {
1537 vol = ubi->volumes[i];
1541 scan_eba[i] = kmalloc_array(vol->reserved_pebs,
1549 fm_eba[i] = kmalloc_array(vol->reserved_pebs,
1557 for (j = 0; j < vol->reserved_pebs; j++)
1558 scan_eba[i][j] = fm_eba[i][j] = UBI_LEB_UNMAPPED;
1560 av = ubi_find_av(ai_scan, idx2vol_id(ubi, i));
1564 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
1565 scan_eba[i][aeb->lnum] = aeb->pnum;
1567 av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i));
1571 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
1572 fm_eba[i][aeb->lnum] = aeb->pnum;
1574 for (j = 0; j < vol->reserved_pebs; j++) {
1575 if (scan_eba[i][j] != fm_eba[i][j]) {
1576 if (scan_eba[i][j] == UBI_LEB_UNMAPPED ||
1577 fm_eba[i][j] == UBI_LEB_UNMAPPED)
1580 ubi_err(ubi, "LEB:%i:%i is PEB:%i instead of %i!",
1581 vol->vol_id, j, fm_eba[i][j],
1589 for (i = 0; i < num_volumes; i++) {
1590 if (!ubi->volumes[i])
1603 * ubi_eba_init - initialize the EBA sub-system using attaching information.
1604 * @ubi: UBI device description object
1605 * @ai: attaching information
1607 * This function returns zero in case of success and a negative error code in
1610 int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1612 int i, err, num_volumes;
1613 struct ubi_ainf_volume *av;
1614 struct ubi_volume *vol;
1615 struct ubi_ainf_peb *aeb;
1618 dbg_eba("initialize EBA sub-system");
1620 spin_lock_init(&ubi->ltree_lock);
1621 mutex_init(&ubi->alc_mutex);
1622 ubi->ltree = RB_ROOT;
1624 ubi->global_sqnum = ai->max_sqnum + 1;
1625 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1627 for (i = 0; i < num_volumes; i++) {
1628 struct ubi_eba_table *tbl;
1630 vol = ubi->volumes[i];
1636 tbl = ubi_eba_create_table(vol, vol->reserved_pebs);
1642 ubi_eba_replace_table(vol, tbl);
1644 av = ubi_find_av(ai, idx2vol_id(ubi, i));
1648 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
1649 if (aeb->lnum >= vol->reserved_pebs) {
1651 * This may happen in case of an unclean reboot
1654 ubi_move_aeb_to_list(av, aeb, &ai->erase);
1656 struct ubi_eba_entry *entry;
1658 entry = &vol->eba_tbl->entries[aeb->lnum];
1659 entry->pnum = aeb->pnum;
1664 if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
1665 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1666 ubi->avail_pebs, EBA_RESERVED_PEBS);
1667 if (ubi->corr_peb_count)
1668 ubi_err(ubi, "%d PEBs are corrupted and not used",
1669 ubi->corr_peb_count);
1673 ubi->avail_pebs -= EBA_RESERVED_PEBS;
1674 ubi->rsvd_pebs += EBA_RESERVED_PEBS;
1676 if (ubi->bad_allowed) {
1677 ubi_calculate_reserved(ubi);
1679 if (ubi->avail_pebs < ubi->beb_rsvd_level) {
1680 /* No enough free physical eraseblocks */
1681 ubi->beb_rsvd_pebs = ubi->avail_pebs;
1682 print_rsvd_warning(ubi, ai);
1684 ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
1686 ubi->avail_pebs -= ubi->beb_rsvd_pebs;
1687 ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
1690 dbg_eba("EBA sub-system is initialized");
1694 for (i = 0; i < num_volumes; i++) {
1695 if (!ubi->volumes[i])
1697 ubi_eba_replace_table(ubi->volumes[i], NULL);