1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (c) International Business Machines Corp., 2006
5 * Author: Artem Bityutskiy (Битюцкий Артём)
9 * The UBI Eraseblock Association (EBA) sub-system.
11 * This sub-system is responsible for I/O to/from logical eraseblock.
13 * Although in this implementation the EBA table is fully kept and managed in
14 * RAM, which assumes poor scalability, it might be (partially) maintained on
15 * flash in future implementations.
17 * The EBA sub-system implements per-logical eraseblock locking. Before
18 * accessing a logical eraseblock it is locked for reading or writing. The
19 * per-logical eraseblock locking is implemented by means of the lock tree. The
20 * lock tree is an RB-tree which refers all the currently locked logical
21 * eraseblocks. The lock tree elements are &struct ubi_ltree_entry objects.
22 * They are indexed by (@vol_id, @lnum) pairs.
24 * EBA also maintains the global sequence counter which is incremented each
25 * time a logical eraseblock is mapped to a physical eraseblock and it is
26 * stored in the volume identifier header. This means that each VID header has
27 * a unique sequence number. The sequence number is only increased an we assume
28 * 64 bits is enough to never overflow.
31 #include <linux/slab.h>
32 #include <linux/crc32.h>
33 #include <linux/err.h>
36 /* Number of physical eraseblocks reserved for atomic LEB change operation */
37 #define EBA_RESERVED_PEBS 1
40 * struct ubi_eba_entry - structure encoding a single LEB -> PEB association
41 * @pnum: the physical eraseblock number attached to the LEB
43 * This structure is encoding a LEB -> PEB association. Note that the LEB
44 * number is not stored here, because it is the index used to access the
47 struct ubi_eba_entry {
52 * struct ubi_eba_table - LEB -> PEB association information
53 * @entries: the LEB to PEB mapping (one entry per LEB).
55 * This structure is private to the EBA logic and should be kept here.
56 * It is encoding the LEB to PEB association table, and is subject to
59 struct ubi_eba_table {
60 struct ubi_eba_entry *entries;
64 * next_sqnum - get next sequence number.
65 * @ubi: UBI device description object
67 * This function returns next sequence number to use, which is just the current
68 * global sequence counter value. It also increases the global sequence
71 unsigned long long ubi_next_sqnum(struct ubi_device *ubi)
73 unsigned long long sqnum;
75 spin_lock(&ubi->ltree_lock);
76 sqnum = ubi->global_sqnum++;
77 spin_unlock(&ubi->ltree_lock);
83 * ubi_get_compat - get compatibility flags of a volume.
84 * @ubi: UBI device description object
87 * This function returns compatibility flags for an internal volume. User
88 * volumes have no compatibility flags, so %0 is returned.
90 static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
92 if (vol_id == UBI_LAYOUT_VOLUME_ID)
93 return UBI_LAYOUT_VOLUME_COMPAT;
98 * ubi_eba_get_ldesc - get information about a LEB
99 * @vol: volume description object
100 * @lnum: logical eraseblock number
101 * @ldesc: the LEB descriptor to fill
103 * Used to query information about a specific LEB.
104 * It is currently only returning the physical position of the LEB, but will be
105 * extended to provide more information.
107 void ubi_eba_get_ldesc(struct ubi_volume *vol, int lnum,
108 struct ubi_eba_leb_desc *ldesc)
111 ldesc->pnum = vol->eba_tbl->entries[lnum].pnum;
115 * ubi_eba_create_table - allocate a new EBA table and initialize it with all
117 * @vol: volume containing the EBA table to copy
118 * @nentries: number of entries in the table
120 * Allocate a new EBA table and initialize it with all LEBs unmapped.
121 * Returns a valid pointer if it succeed, an ERR_PTR() otherwise.
123 struct ubi_eba_table *ubi_eba_create_table(struct ubi_volume *vol,
126 struct ubi_eba_table *tbl;
130 tbl = kzalloc(sizeof(*tbl), GFP_KERNEL);
132 return ERR_PTR(-ENOMEM);
134 tbl->entries = kmalloc_array(nentries, sizeof(*tbl->entries),
139 for (i = 0; i < nentries; i++)
140 tbl->entries[i].pnum = UBI_LEB_UNMAPPED;
152 * ubi_eba_destroy_table - destroy an EBA table
153 * @tbl: the table to destroy
155 * Destroy an EBA table.
157 void ubi_eba_destroy_table(struct ubi_eba_table *tbl)
167 * ubi_eba_copy_table - copy the EBA table attached to vol into another table
168 * @vol: volume containing the EBA table to copy
170 * @nentries: number of entries to copy
172 * Copy the EBA table stored in vol into the one pointed by dst.
174 void ubi_eba_copy_table(struct ubi_volume *vol, struct ubi_eba_table *dst,
177 struct ubi_eba_table *src;
180 ubi_assert(dst && vol && vol->eba_tbl);
184 for (i = 0; i < nentries; i++)
185 dst->entries[i].pnum = src->entries[i].pnum;
189 * ubi_eba_replace_table - assign a new EBA table to a volume
190 * @vol: volume containing the EBA table to copy
191 * @tbl: new EBA table
193 * Assign a new EBA table to the volume and release the old one.
195 void ubi_eba_replace_table(struct ubi_volume *vol, struct ubi_eba_table *tbl)
197 ubi_eba_destroy_table(vol->eba_tbl);
202 * ltree_lookup - look up the lock tree.
203 * @ubi: UBI device description object
205 * @lnum: logical eraseblock number
207 * This function returns a pointer to the corresponding &struct ubi_ltree_entry
208 * object if the logical eraseblock is locked and %NULL if it is not.
209 * @ubi->ltree_lock has to be locked.
211 static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
216 p = ubi->ltree.rb_node;
218 struct ubi_ltree_entry *le;
220 le = rb_entry(p, struct ubi_ltree_entry, rb);
222 if (vol_id < le->vol_id)
224 else if (vol_id > le->vol_id)
229 else if (lnum > le->lnum)
240 * ltree_add_entry - add new entry to the lock tree.
241 * @ubi: UBI device description object
243 * @lnum: logical eraseblock number
245 * This function adds new entry for logical eraseblock (@vol_id, @lnum) to the
246 * lock tree. If such entry is already there, its usage counter is increased.
247 * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
250 static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
251 int vol_id, int lnum)
253 struct ubi_ltree_entry *le, *le1, *le_free;
255 le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS);
257 return ERR_PTR(-ENOMEM);
260 init_rwsem(&le->mutex);
264 spin_lock(&ubi->ltree_lock);
265 le1 = ltree_lookup(ubi, vol_id, lnum);
269 * This logical eraseblock is already locked. The newly
270 * allocated lock entry is not needed.
275 struct rb_node **p, *parent = NULL;
278 * No lock entry, add the newly allocated one to the
279 * @ubi->ltree RB-tree.
283 p = &ubi->ltree.rb_node;
286 le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
288 if (vol_id < le1->vol_id)
290 else if (vol_id > le1->vol_id)
293 ubi_assert(lnum != le1->lnum);
294 if (lnum < le1->lnum)
301 rb_link_node(&le->rb, parent, p);
302 rb_insert_color(&le->rb, &ubi->ltree);
305 spin_unlock(&ubi->ltree_lock);
312 * leb_read_lock - lock logical eraseblock for reading.
313 * @ubi: UBI device description object
315 * @lnum: logical eraseblock number
317 * This function locks a logical eraseblock for reading. Returns zero in case
318 * of success and a negative error code in case of failure.
320 static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
322 struct ubi_ltree_entry *le;
324 le = ltree_add_entry(ubi, vol_id, lnum);
327 down_read(&le->mutex);
332 * leb_read_unlock - unlock logical eraseblock.
333 * @ubi: UBI device description object
335 * @lnum: logical eraseblock number
337 static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
339 struct ubi_ltree_entry *le;
341 spin_lock(&ubi->ltree_lock);
342 le = ltree_lookup(ubi, vol_id, lnum);
344 ubi_assert(le->users >= 0);
346 if (le->users == 0) {
347 rb_erase(&le->rb, &ubi->ltree);
350 spin_unlock(&ubi->ltree_lock);
354 * leb_write_lock - lock logical eraseblock for writing.
355 * @ubi: UBI device description object
357 * @lnum: logical eraseblock number
359 * This function locks a logical eraseblock for writing. Returns zero in case
360 * of success and a negative error code in case of failure.
362 static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
364 struct ubi_ltree_entry *le;
366 le = ltree_add_entry(ubi, vol_id, lnum);
369 down_write(&le->mutex);
374 * leb_write_trylock - try to lock logical eraseblock for writing.
375 * @ubi: UBI device description object
377 * @lnum: logical eraseblock number
379 * This function locks a logical eraseblock for writing if there is no
380 * contention and does nothing if there is contention. Returns %0 in case of
381 * success, %1 in case of contention, and and a negative error code in case of
384 static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
386 struct ubi_ltree_entry *le;
388 le = ltree_add_entry(ubi, vol_id, lnum);
391 if (down_write_trylock(&le->mutex))
394 /* Contention, cancel */
395 spin_lock(&ubi->ltree_lock);
397 ubi_assert(le->users >= 0);
398 if (le->users == 0) {
399 rb_erase(&le->rb, &ubi->ltree);
402 spin_unlock(&ubi->ltree_lock);
408 * leb_write_unlock - unlock logical eraseblock.
409 * @ubi: UBI device description object
411 * @lnum: logical eraseblock number
413 static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
415 struct ubi_ltree_entry *le;
417 spin_lock(&ubi->ltree_lock);
418 le = ltree_lookup(ubi, vol_id, lnum);
420 ubi_assert(le->users >= 0);
421 up_write(&le->mutex);
422 if (le->users == 0) {
423 rb_erase(&le->rb, &ubi->ltree);
426 spin_unlock(&ubi->ltree_lock);
430 * ubi_eba_is_mapped - check if a LEB is mapped.
431 * @vol: volume description object
432 * @lnum: logical eraseblock number
434 * This function returns true if the LEB is mapped, false otherwise.
436 bool ubi_eba_is_mapped(struct ubi_volume *vol, int lnum)
438 return vol->eba_tbl->entries[lnum].pnum >= 0;
442 * ubi_eba_unmap_leb - un-map logical eraseblock.
443 * @ubi: UBI device description object
444 * @vol: volume description object
445 * @lnum: logical eraseblock number
447 * This function un-maps logical eraseblock @lnum and schedules corresponding
448 * physical eraseblock for erasure. Returns zero in case of success and a
449 * negative error code in case of failure.
451 int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
454 int err, pnum, vol_id = vol->vol_id;
459 err = leb_write_lock(ubi, vol_id, lnum);
463 pnum = vol->eba_tbl->entries[lnum].pnum;
465 /* This logical eraseblock is already unmapped */
468 dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
470 down_read(&ubi->fm_eba_sem);
471 vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED;
472 up_read(&ubi->fm_eba_sem);
473 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
476 leb_write_unlock(ubi, vol_id, lnum);
480 #ifdef CONFIG_MTD_UBI_FASTMAP
482 * check_mapping - check and fixup a mapping
483 * @ubi: UBI device description object
484 * @vol: volume description object
485 * @lnum: logical eraseblock number
486 * @pnum: physical eraseblock number
488 * Checks whether a given mapping is valid. Fastmap cannot track LEB unmap
489 * operations, if such an operation is interrupted the mapping still looks
490 * good, but upon first read an ECC is reported to the upper layer.
491 * Normaly during the full-scan at attach time this is fixed, for Fastmap
492 * we have to deal with it while reading.
493 * If the PEB behind a LEB shows this symthom we change the mapping to
494 * %UBI_LEB_UNMAPPED and schedule the PEB for erasure.
496 * Returns 0 on success, negative error code in case of failure.
498 static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
502 struct ubi_vid_io_buf *vidb;
503 struct ubi_vid_hdr *vid_hdr;
505 if (!ubi->fast_attach)
508 if (!vol->checkmap || test_bit(lnum, vol->checkmap))
511 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
515 err = ubi_io_read_vid_hdr(ubi, *pnum, vidb, 0);
516 if (err > 0 && err != UBI_IO_BITFLIPS) {
521 case UBI_IO_FF_BITFLIPS:
523 case UBI_IO_BAD_HDR_EBADMSG:
529 if (err == UBI_IO_BAD_HDR_EBADMSG || err == UBI_IO_FF_BITFLIPS)
532 down_read(&ubi->fm_eba_sem);
533 vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED;
534 up_read(&ubi->fm_eba_sem);
535 ubi_wl_put_peb(ubi, vol->vol_id, lnum, *pnum, torture);
537 *pnum = UBI_LEB_UNMAPPED;
538 } else if (err < 0) {
539 ubi_err(ubi, "unable to read VID header back from PEB %i: %i",
544 int found_vol_id, found_lnum;
546 ubi_assert(err == 0 || err == UBI_IO_BITFLIPS);
548 vid_hdr = ubi_get_vid_hdr(vidb);
549 found_vol_id = be32_to_cpu(vid_hdr->vol_id);
550 found_lnum = be32_to_cpu(vid_hdr->lnum);
552 if (found_lnum != lnum || found_vol_id != vol->vol_id) {
553 ubi_err(ubi, "EBA mismatch! PEB %i is LEB %i:%i instead of LEB %i:%i",
554 *pnum, found_vol_id, found_lnum, vol->vol_id, lnum);
561 set_bit(lnum, vol->checkmap);
565 ubi_free_vid_buf(vidb);
570 static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
578 * ubi_eba_read_leb - read data.
579 * @ubi: UBI device description object
580 * @vol: volume description object
581 * @lnum: logical eraseblock number
582 * @buf: buffer to store the read data
583 * @offset: offset from where to read
584 * @len: how many bytes to read
585 * @check: data CRC check flag
587 * If the logical eraseblock @lnum is unmapped, @buf is filled with 0xFF
588 * bytes. The @check flag only makes sense for static volumes and forces
589 * eraseblock data CRC checking.
591 * In case of success this function returns zero. In case of a static volume,
592 * if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be
593 * returned for any volume type if an ECC error was detected by the MTD device
594 * driver. Other negative error cored may be returned in case of other errors.
596 int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
597 void *buf, int offset, int len, int check)
599 int err, pnum, scrub = 0, vol_id = vol->vol_id;
600 struct ubi_vid_io_buf *vidb;
601 struct ubi_vid_hdr *vid_hdr;
604 err = leb_read_lock(ubi, vol_id, lnum);
608 pnum = vol->eba_tbl->entries[lnum].pnum;
610 err = check_mapping(ubi, vol, lnum, &pnum);
615 if (pnum == UBI_LEB_UNMAPPED) {
617 * The logical eraseblock is not mapped, fill the whole buffer
618 * with 0xFF bytes. The exception is static volumes for which
619 * it is an error to read unmapped logical eraseblocks.
621 dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)",
622 len, offset, vol_id, lnum);
623 leb_read_unlock(ubi, vol_id, lnum);
624 ubi_assert(vol->vol_type != UBI_STATIC_VOLUME);
625 memset(buf, 0xFF, len);
629 dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d",
630 len, offset, vol_id, lnum, pnum);
632 if (vol->vol_type == UBI_DYNAMIC_VOLUME)
637 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
643 vid_hdr = ubi_get_vid_hdr(vidb);
645 err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1);
646 if (err && err != UBI_IO_BITFLIPS) {
649 * The header is either absent or corrupted.
650 * The former case means there is a bug -
651 * switch to read-only mode just in case.
652 * The latter case means a real corruption - we
653 * may try to recover data. FIXME: but this is
656 if (err == UBI_IO_BAD_HDR_EBADMSG ||
657 err == UBI_IO_BAD_HDR) {
658 ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d",
663 * Ending up here in the non-Fastmap case
664 * is a clear bug as the VID header had to
665 * be present at scan time to have it referenced.
666 * With fastmap the story is more complicated.
667 * Fastmap has the mapping info without the need
668 * of a full scan. So the LEB could have been
669 * unmapped, Fastmap cannot know this and keeps
670 * the LEB referenced.
671 * This is valid and works as the layer above UBI
672 * has to do bookkeeping about used/referenced
675 if (ubi->fast_attach) {
684 } else if (err == UBI_IO_BITFLIPS)
687 ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs));
688 ubi_assert(len == be32_to_cpu(vid_hdr->data_size));
690 crc = be32_to_cpu(vid_hdr->data_crc);
691 ubi_free_vid_buf(vidb);
694 err = ubi_io_read_data(ubi, buf, pnum, offset, len);
696 if (err == UBI_IO_BITFLIPS)
698 else if (mtd_is_eccerr(err)) {
699 if (vol->vol_type == UBI_DYNAMIC_VOLUME)
703 ubi_msg(ubi, "force data checking");
712 uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len);
714 ubi_warn(ubi, "CRC error: calculated %#08x, must be %#08x",
722 err = ubi_wl_scrub_peb(ubi, pnum);
724 leb_read_unlock(ubi, vol_id, lnum);
728 ubi_free_vid_buf(vidb);
730 leb_read_unlock(ubi, vol_id, lnum);
735 * ubi_eba_read_leb_sg - read data into a scatter gather list.
736 * @ubi: UBI device description object
737 * @vol: volume description object
738 * @lnum: logical eraseblock number
739 * @sgl: UBI scatter gather list to store the read data
740 * @offset: offset from where to read
741 * @len: how many bytes to read
742 * @check: data CRC check flag
744 * This function works exactly like ubi_eba_read_leb(). But instead of
745 * storing the read data into a buffer it writes to an UBI scatter gather
748 int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol,
749 struct ubi_sgl *sgl, int lnum, int offset, int len,
754 struct scatterlist *sg;
757 ubi_assert(sgl->list_pos < UBI_MAX_SG_COUNT);
758 sg = &sgl->sg[sgl->list_pos];
759 if (len < sg->length - sgl->page_pos)
762 to_read = sg->length - sgl->page_pos;
764 ret = ubi_eba_read_leb(ubi, vol, lnum,
765 sg_virt(sg) + sgl->page_pos, offset,
773 sgl->page_pos += to_read;
774 if (sgl->page_pos == sg->length) {
790 * try_recover_peb - try to recover from write failure.
791 * @vol: volume description object
792 * @pnum: the physical eraseblock to recover
793 * @lnum: logical eraseblock number
794 * @buf: data which was not written because of the write failure
795 * @offset: offset of the failed write
796 * @len: how many bytes should have been written
798 * @retry: whether the caller should retry in case of failure
800 * This function is called in case of a write failure and moves all good data
801 * from the potentially bad physical eraseblock to a good physical eraseblock.
802 * This function also writes the data which was not written due to the failure.
803 * Returns 0 in case of success, and a negative error code in case of failure.
804 * In case of failure, the %retry parameter is set to false if this is a fatal
805 * error (retrying won't help), and true otherwise.
807 static int try_recover_peb(struct ubi_volume *vol, int pnum, int lnum,
808 const void *buf, int offset, int len,
809 struct ubi_vid_io_buf *vidb, bool *retry)
811 struct ubi_device *ubi = vol->ubi;
812 struct ubi_vid_hdr *vid_hdr;
813 int new_pnum, err, vol_id = vol->vol_id, data_size;
818 new_pnum = ubi_wl_get_peb(ubi);
824 ubi_msg(ubi, "recover PEB %d, move data to PEB %d",
827 err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1);
828 if (err && err != UBI_IO_BITFLIPS) {
834 vid_hdr = ubi_get_vid_hdr(vidb);
835 ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC);
837 mutex_lock(&ubi->buf_mutex);
838 memset(ubi->peb_buf + offset, 0xFF, len);
840 /* Read everything before the area where the write failure happened */
842 err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
843 if (err && err != UBI_IO_BITFLIPS)
849 memcpy(ubi->peb_buf + offset, buf, len);
851 data_size = offset + len;
852 crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
853 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
854 vid_hdr->copy_flag = 1;
855 vid_hdr->data_size = cpu_to_be32(data_size);
856 vid_hdr->data_crc = cpu_to_be32(crc);
857 err = ubi_io_write_vid_hdr(ubi, new_pnum, vidb);
861 err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
864 mutex_unlock(&ubi->buf_mutex);
867 vol->eba_tbl->entries[lnum].pnum = new_pnum;
870 up_read(&ubi->fm_eba_sem);
873 ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
874 ubi_msg(ubi, "data was successfully recovered");
875 } else if (new_pnum >= 0) {
877 * Bad luck? This physical eraseblock is bad too? Crud. Let's
878 * try to get another one.
880 ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
881 ubi_warn(ubi, "failed to write to PEB %d", new_pnum);
888 * recover_peb - recover from write failure.
889 * @ubi: UBI device description object
890 * @pnum: the physical eraseblock to recover
892 * @lnum: logical eraseblock number
893 * @buf: data which was not written because of the write failure
894 * @offset: offset of the failed write
895 * @len: how many bytes should have been written
897 * This function is called in case of a write failure and moves all good data
898 * from the potentially bad physical eraseblock to a good physical eraseblock.
899 * This function also writes the data which was not written due to the failure.
900 * Returns 0 in case of success, and a negative error code in case of failure.
901 * This function tries %UBI_IO_RETRIES before giving up.
903 static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
904 const void *buf, int offset, int len)
906 int err, idx = vol_id2idx(ubi, vol_id), tries;
907 struct ubi_volume *vol = ubi->volumes[idx];
908 struct ubi_vid_io_buf *vidb;
910 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
914 for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
917 err = try_recover_peb(vol, pnum, lnum, buf, offset, len, vidb,
922 ubi_msg(ubi, "try again");
925 ubi_free_vid_buf(vidb);
931 * try_write_vid_and_data - try to write VID header and data to a new PEB.
932 * @vol: volume description object
933 * @lnum: logical eraseblock number
934 * @vidb: the VID buffer to write
935 * @buf: buffer containing the data
936 * @offset: where to start writing data
937 * @len: how many bytes should be written
939 * This function tries to write VID header and data belonging to logical
940 * eraseblock @lnum of volume @vol to a new physical eraseblock. Returns zero
941 * in case of success and a negative error code in case of failure.
942 * In case of error, it is possible that something was still written to the
943 * flash media, but may be some garbage.
945 static int try_write_vid_and_data(struct ubi_volume *vol, int lnum,
946 struct ubi_vid_io_buf *vidb, const void *buf,
949 struct ubi_device *ubi = vol->ubi;
950 int pnum, opnum, err, err2, vol_id = vol->vol_id;
952 pnum = ubi_wl_get_peb(ubi);
958 opnum = vol->eba_tbl->entries[lnum].pnum;
960 dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
961 len, offset, vol_id, lnum, pnum);
963 err = ubi_io_write_vid_hdr(ubi, pnum, vidb);
965 ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d",
971 err = ubi_io_write_data(ubi, buf, pnum, offset, len);
974 "failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
975 len, offset, vol_id, lnum, pnum);
980 vol->eba_tbl->entries[lnum].pnum = pnum;
983 up_read(&ubi->fm_eba_sem);
985 if (err && pnum >= 0) {
986 err2 = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
988 ubi_warn(ubi, "failed to return physical eraseblock %d, error %d",
991 } else if (!err && opnum >= 0) {
992 err2 = ubi_wl_put_peb(ubi, vol_id, lnum, opnum, 0);
994 ubi_warn(ubi, "failed to return physical eraseblock %d, error %d",
1003 * ubi_eba_write_leb - write data to dynamic volume.
1004 * @ubi: UBI device description object
1005 * @vol: volume description object
1006 * @lnum: logical eraseblock number
1007 * @buf: the data to write
1008 * @offset: offset within the logical eraseblock where to write
1009 * @len: how many bytes to write
1011 * This function writes data to logical eraseblock @lnum of a dynamic volume
1012 * @vol. Returns zero in case of success and a negative error code in case
1013 * of failure. In case of error, it is possible that something was still
1014 * written to the flash media, but may be some garbage.
1015 * This function retries %UBI_IO_RETRIES times before giving up.
1017 int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
1018 const void *buf, int offset, int len)
1020 int err, pnum, tries, vol_id = vol->vol_id;
1021 struct ubi_vid_io_buf *vidb;
1022 struct ubi_vid_hdr *vid_hdr;
1027 err = leb_write_lock(ubi, vol_id, lnum);
1031 pnum = vol->eba_tbl->entries[lnum].pnum;
1033 err = check_mapping(ubi, vol, lnum, &pnum);
1039 dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
1040 len, offset, vol_id, lnum, pnum);
1042 err = ubi_io_write_data(ubi, buf, pnum, offset, len);
1044 ubi_warn(ubi, "failed to write data to PEB %d", pnum);
1045 if (err == -EIO && ubi->bad_allowed)
1046 err = recover_peb(ubi, pnum, vol_id, lnum, buf,
1054 * The logical eraseblock is not mapped. We have to get a free physical
1055 * eraseblock and write the volume identifier header there first.
1057 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
1059 leb_write_unlock(ubi, vol_id, lnum);
1063 vid_hdr = ubi_get_vid_hdr(vidb);
1065 vid_hdr->vol_type = UBI_VID_DYNAMIC;
1066 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1067 vid_hdr->vol_id = cpu_to_be32(vol_id);
1068 vid_hdr->lnum = cpu_to_be32(lnum);
1069 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
1070 vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
1072 for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
1073 err = try_write_vid_and_data(vol, lnum, vidb, buf, offset, len);
1074 if (err != -EIO || !ubi->bad_allowed)
1078 * Fortunately, this is the first write operation to this
1079 * physical eraseblock, so just put it and request a new one.
1080 * We assume that if this physical eraseblock went bad, the
1081 * erase code will handle that.
1083 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1084 ubi_msg(ubi, "try another PEB");
1087 ubi_free_vid_buf(vidb);
1093 leb_write_unlock(ubi, vol_id, lnum);
1099 * ubi_eba_write_leb_st - write data to static volume.
1100 * @ubi: UBI device description object
1101 * @vol: volume description object
1102 * @lnum: logical eraseblock number
1103 * @buf: data to write
1104 * @len: how many bytes to write
1105 * @used_ebs: how many logical eraseblocks will this volume contain
1107 * This function writes data to logical eraseblock @lnum of static volume
1108 * @vol. The @used_ebs argument should contain total number of logical
1109 * eraseblock in this static volume.
1111 * When writing to the last logical eraseblock, the @len argument doesn't have
1112 * to be aligned to the minimal I/O unit size. Instead, it has to be equivalent
1113 * to the real data size, although the @buf buffer has to contain the
1114 * alignment. In all other cases, @len has to be aligned.
1116 * It is prohibited to write more than once to logical eraseblocks of static
1117 * volumes. This function returns zero in case of success and a negative error
1118 * code in case of failure.
1120 int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
1121 int lnum, const void *buf, int len, int used_ebs)
1123 int err, tries, data_size = len, vol_id = vol->vol_id;
1124 struct ubi_vid_io_buf *vidb;
1125 struct ubi_vid_hdr *vid_hdr;
1131 if (lnum == used_ebs - 1)
1132 /* If this is the last LEB @len may be unaligned */
1133 len = ALIGN(data_size, ubi->min_io_size);
1135 ubi_assert(!(len & (ubi->min_io_size - 1)));
1137 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
1141 vid_hdr = ubi_get_vid_hdr(vidb);
1143 err = leb_write_lock(ubi, vol_id, lnum);
1147 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1148 vid_hdr->vol_id = cpu_to_be32(vol_id);
1149 vid_hdr->lnum = cpu_to_be32(lnum);
1150 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
1151 vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
1153 crc = crc32(UBI_CRC32_INIT, buf, data_size);
1154 vid_hdr->vol_type = UBI_VID_STATIC;
1155 vid_hdr->data_size = cpu_to_be32(data_size);
1156 vid_hdr->used_ebs = cpu_to_be32(used_ebs);
1157 vid_hdr->data_crc = cpu_to_be32(crc);
1159 ubi_assert(vol->eba_tbl->entries[lnum].pnum < 0);
1161 for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
1162 err = try_write_vid_and_data(vol, lnum, vidb, buf, 0, len);
1163 if (err != -EIO || !ubi->bad_allowed)
1166 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1167 ubi_msg(ubi, "try another PEB");
1173 leb_write_unlock(ubi, vol_id, lnum);
1176 ubi_free_vid_buf(vidb);
1182 * ubi_eba_atomic_leb_change - change logical eraseblock atomically.
1183 * @ubi: UBI device description object
1184 * @vol: volume description object
1185 * @lnum: logical eraseblock number
1186 * @buf: data to write
1187 * @len: how many bytes to write
1189 * This function changes the contents of a logical eraseblock atomically. @buf
1190 * has to contain new logical eraseblock data, and @len - the length of the
1191 * data, which has to be aligned. This function guarantees that in case of an
1192 * unclean reboot the old contents is preserved. Returns zero in case of
1193 * success and a negative error code in case of failure.
1195 * UBI reserves one LEB for the "atomic LEB change" operation, so only one
1196 * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
1198 int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
1199 int lnum, const void *buf, int len)
1201 int err, tries, vol_id = vol->vol_id;
1202 struct ubi_vid_io_buf *vidb;
1203 struct ubi_vid_hdr *vid_hdr;
1211 * Special case when data length is zero. In this case the LEB
1212 * has to be unmapped and mapped somewhere else.
1214 err = ubi_eba_unmap_leb(ubi, vol, lnum);
1217 return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
1220 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS);
1224 vid_hdr = ubi_get_vid_hdr(vidb);
1226 mutex_lock(&ubi->alc_mutex);
1227 err = leb_write_lock(ubi, vol_id, lnum);
1231 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1232 vid_hdr->vol_id = cpu_to_be32(vol_id);
1233 vid_hdr->lnum = cpu_to_be32(lnum);
1234 vid_hdr->compat = ubi_get_compat(ubi, vol_id);
1235 vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
1237 crc = crc32(UBI_CRC32_INIT, buf, len);
1238 vid_hdr->vol_type = UBI_VID_DYNAMIC;
1239 vid_hdr->data_size = cpu_to_be32(len);
1240 vid_hdr->copy_flag = 1;
1241 vid_hdr->data_crc = cpu_to_be32(crc);
1243 dbg_eba("change LEB %d:%d", vol_id, lnum);
1245 for (tries = 0; tries <= UBI_IO_RETRIES; tries++) {
1246 err = try_write_vid_and_data(vol, lnum, vidb, buf, 0, len);
1247 if (err != -EIO || !ubi->bad_allowed)
1250 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1251 ubi_msg(ubi, "try another PEB");
1255 * This flash device does not admit of bad eraseblocks or
1256 * something nasty and unexpected happened. Switch to read-only
1257 * mode just in case.
1262 leb_write_unlock(ubi, vol_id, lnum);
1265 mutex_unlock(&ubi->alc_mutex);
1266 ubi_free_vid_buf(vidb);
1271 * is_error_sane - check whether a read error is sane.
1272 * @err: code of the error happened during reading
1274 * This is a helper function for 'ubi_eba_copy_leb()' which is called when we
1275 * cannot read data from the target PEB (an error @err happened). If the error
1276 * code is sane, then we treat this error as non-fatal. Otherwise the error is
1277 * fatal and UBI will be switched to R/O mode later.
1279 * The idea is that we try not to switch to R/O mode if the read error is
1280 * something which suggests there was a real read problem. E.g., %-EIO. Or a
1281 * memory allocation failed (-%ENOMEM). Otherwise, it is safer to switch to R/O
1282 * mode, simply because we do not know what happened at the MTD level, and we
1283 * cannot handle this. E.g., the underlying driver may have become crazy, and
1284 * it is safer to switch to R/O mode to preserve the data.
1286 * And bear in mind, this is about reading from the target PEB, i.e. the PEB
1287 * which we have just written.
1289 static int is_error_sane(int err)
1291 if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_HDR ||
1292 err == UBI_IO_BAD_HDR_EBADMSG || err == -ETIMEDOUT)
1298 * ubi_eba_copy_leb - copy logical eraseblock.
1299 * @ubi: UBI device description object
1300 * @from: physical eraseblock number from where to copy
1301 * @to: physical eraseblock number where to copy
1302 * @vid_hdr: VID header of the @from physical eraseblock
1304 * This function copies logical eraseblock from physical eraseblock @from to
1305 * physical eraseblock @to. The @vid_hdr buffer may be changed by this
1306 * function. Returns:
1307 * o %0 in case of success;
1308 * o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_TARGET_BITFLIPS, etc;
1309 * o a negative error code in case of failure.
1311 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
1312 struct ubi_vid_io_buf *vidb)
1314 int err, vol_id, lnum, data_size, aldata_size, idx;
1315 struct ubi_vid_hdr *vid_hdr = ubi_get_vid_hdr(vidb);
1316 struct ubi_volume *vol;
1319 ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem));
1321 vol_id = be32_to_cpu(vid_hdr->vol_id);
1322 lnum = be32_to_cpu(vid_hdr->lnum);
1324 dbg_wl("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
1326 if (vid_hdr->vol_type == UBI_VID_STATIC) {
1327 data_size = be32_to_cpu(vid_hdr->data_size);
1328 aldata_size = ALIGN(data_size, ubi->min_io_size);
1330 data_size = aldata_size =
1331 ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
1333 idx = vol_id2idx(ubi, vol_id);
1334 spin_lock(&ubi->volumes_lock);
1336 * Note, we may race with volume deletion, which means that the volume
1337 * this logical eraseblock belongs to might be being deleted. Since the
1338 * volume deletion un-maps all the volume's logical eraseblocks, it will
1339 * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
1341 vol = ubi->volumes[idx];
1342 spin_unlock(&ubi->volumes_lock);
1344 /* No need to do further work, cancel */
1345 dbg_wl("volume %d is being removed, cancel", vol_id);
1346 return MOVE_CANCEL_RACE;
1350 * We do not want anybody to write to this logical eraseblock while we
1351 * are moving it, so lock it.
1353 * Note, we are using non-waiting locking here, because we cannot sleep
1354 * on the LEB, since it may cause deadlocks. Indeed, imagine a task is
1355 * unmapping the LEB which is mapped to the PEB we are going to move
1356 * (@from). This task locks the LEB and goes sleep in the
1357 * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
1358 * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
1359 * LEB is already locked, we just do not move it and return
1360 * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
1361 * we do not know the reasons of the contention - it may be just a
1362 * normal I/O on this LEB, so we want to re-try.
1364 err = leb_write_trylock(ubi, vol_id, lnum);
1366 dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
1371 * The LEB might have been put meanwhile, and the task which put it is
1372 * probably waiting on @ubi->move_mutex. No need to continue the work,
1375 if (vol->eba_tbl->entries[lnum].pnum != from) {
1376 dbg_wl("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel",
1377 vol_id, lnum, from, vol->eba_tbl->entries[lnum].pnum);
1378 err = MOVE_CANCEL_RACE;
1379 goto out_unlock_leb;
1383 * OK, now the LEB is locked and we can safely start moving it. Since
1384 * this function utilizes the @ubi->peb_buf buffer which is shared
1385 * with some other functions - we lock the buffer by taking the
1388 mutex_lock(&ubi->buf_mutex);
1389 dbg_wl("read %d bytes of data", aldata_size);
1390 err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size);
1391 if (err && err != UBI_IO_BITFLIPS) {
1392 ubi_warn(ubi, "error %d while reading data from PEB %d",
1394 err = MOVE_SOURCE_RD_ERR;
1395 goto out_unlock_buf;
1399 * Now we have got to calculate how much data we have to copy. In
1400 * case of a static volume it is fairly easy - the VID header contains
1401 * the data size. In case of a dynamic volume it is more difficult - we
1402 * have to read the contents, cut 0xFF bytes from the end and copy only
1403 * the first part. We must do this to avoid writing 0xFF bytes as it
1404 * may have some side-effects. And not only this. It is important not
1405 * to include those 0xFFs to CRC because later the they may be filled
1408 if (vid_hdr->vol_type == UBI_VID_DYNAMIC)
1409 aldata_size = data_size =
1410 ubi_calc_data_len(ubi, ubi->peb_buf, data_size);
1413 crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
1417 * It may turn out to be that the whole @from physical eraseblock
1418 * contains only 0xFF bytes. Then we have to only write the VID header
1419 * and do not write any data. This also means we should not set
1420 * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc.
1422 if (data_size > 0) {
1423 vid_hdr->copy_flag = 1;
1424 vid_hdr->data_size = cpu_to_be32(data_size);
1425 vid_hdr->data_crc = cpu_to_be32(crc);
1427 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
1429 err = ubi_io_write_vid_hdr(ubi, to, vidb);
1432 err = MOVE_TARGET_WR_ERR;
1433 goto out_unlock_buf;
1438 /* Read the VID header back and check if it was written correctly */
1439 err = ubi_io_read_vid_hdr(ubi, to, vidb, 1);
1441 if (err != UBI_IO_BITFLIPS) {
1442 ubi_warn(ubi, "error %d while reading VID header back from PEB %d",
1444 if (is_error_sane(err))
1445 err = MOVE_TARGET_RD_ERR;
1447 err = MOVE_TARGET_BITFLIPS;
1448 goto out_unlock_buf;
1451 if (data_size > 0) {
1452 err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size);
1455 err = MOVE_TARGET_WR_ERR;
1456 goto out_unlock_buf;
1462 ubi_assert(vol->eba_tbl->entries[lnum].pnum == from);
1463 vol->eba_tbl->entries[lnum].pnum = to;
1466 mutex_unlock(&ubi->buf_mutex);
1468 leb_write_unlock(ubi, vol_id, lnum);
1473 * print_rsvd_warning - warn about not having enough reserved PEBs.
1474 * @ubi: UBI device description object
1476 * This is a helper function for 'ubi_eba_init()' which is called when UBI
1477 * cannot reserve enough PEBs for bad block handling. This function makes a
1478 * decision whether we have to print a warning or not. The algorithm is as
1480 * o if this is a new UBI image, then just print the warning
1481 * o if this is an UBI image which has already been used for some time, print
1482 * a warning only if we can reserve less than 10% of the expected amount of
1485 * The idea is that when UBI is used, PEBs become bad, and the reserved pool
1486 * of PEBs becomes smaller, which is normal and we do not want to scare users
1487 * with a warning every time they attach the MTD device. This was an issue
1488 * reported by real users.
1490 static void print_rsvd_warning(struct ubi_device *ubi,
1491 struct ubi_attach_info *ai)
1494 * The 1 << 18 (256KiB) number is picked randomly, just a reasonably
1495 * large number to distinguish between newly flashed and used images.
1497 if (ai->max_sqnum > (1 << 18)) {
1498 int min = ubi->beb_rsvd_level / 10;
1502 if (ubi->beb_rsvd_pebs > min)
1506 ubi_warn(ubi, "cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
1507 ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
1508 if (ubi->corr_peb_count)
1509 ubi_warn(ubi, "%d PEBs are corrupted and not used",
1510 ubi->corr_peb_count);
1514 * self_check_eba - run a self check on the EBA table constructed by fastmap.
1515 * @ubi: UBI device description object
1516 * @ai_fastmap: UBI attach info object created by fastmap
1517 * @ai_scan: UBI attach info object created by scanning
1519 * Returns < 0 in case of an internal error, 0 otherwise.
1520 * If a bad EBA table entry was found it will be printed out and
1521 * ubi_assert() triggers.
1523 int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
1524 struct ubi_attach_info *ai_scan)
1526 int i, j, num_volumes, ret = 0;
1527 int **scan_eba, **fm_eba;
1528 struct ubi_ainf_volume *av;
1529 struct ubi_volume *vol;
1530 struct ubi_ainf_peb *aeb;
1533 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1535 scan_eba = kmalloc_array(num_volumes, sizeof(*scan_eba), GFP_KERNEL);
1539 fm_eba = kmalloc_array(num_volumes, sizeof(*fm_eba), GFP_KERNEL);
1545 for (i = 0; i < num_volumes; i++) {
1546 vol = ubi->volumes[i];
1550 scan_eba[i] = kmalloc_array(vol->reserved_pebs,
1558 fm_eba[i] = kmalloc_array(vol->reserved_pebs,
1566 for (j = 0; j < vol->reserved_pebs; j++)
1567 scan_eba[i][j] = fm_eba[i][j] = UBI_LEB_UNMAPPED;
1569 av = ubi_find_av(ai_scan, idx2vol_id(ubi, i));
1573 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
1574 scan_eba[i][aeb->lnum] = aeb->pnum;
1576 av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i));
1580 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
1581 fm_eba[i][aeb->lnum] = aeb->pnum;
1583 for (j = 0; j < vol->reserved_pebs; j++) {
1584 if (scan_eba[i][j] != fm_eba[i][j]) {
1585 if (scan_eba[i][j] == UBI_LEB_UNMAPPED ||
1586 fm_eba[i][j] == UBI_LEB_UNMAPPED)
1589 ubi_err(ubi, "LEB:%i:%i is PEB:%i instead of %i!",
1590 vol->vol_id, j, fm_eba[i][j],
1598 for (i = 0; i < num_volumes; i++) {
1599 if (!ubi->volumes[i])
1612 * ubi_eba_init - initialize the EBA sub-system using attaching information.
1613 * @ubi: UBI device description object
1614 * @ai: attaching information
1616 * This function returns zero in case of success and a negative error code in
1619 int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
1621 int i, err, num_volumes;
1622 struct ubi_ainf_volume *av;
1623 struct ubi_volume *vol;
1624 struct ubi_ainf_peb *aeb;
1627 dbg_eba("initialize EBA sub-system");
1629 spin_lock_init(&ubi->ltree_lock);
1630 mutex_init(&ubi->alc_mutex);
1631 ubi->ltree = RB_ROOT;
1633 ubi->global_sqnum = ai->max_sqnum + 1;
1634 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
1636 for (i = 0; i < num_volumes; i++) {
1637 struct ubi_eba_table *tbl;
1639 vol = ubi->volumes[i];
1645 tbl = ubi_eba_create_table(vol, vol->reserved_pebs);
1651 ubi_eba_replace_table(vol, tbl);
1653 av = ubi_find_av(ai, idx2vol_id(ubi, i));
1657 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
1658 if (aeb->lnum >= vol->reserved_pebs) {
1660 * This may happen in case of an unclean reboot
1663 ubi_move_aeb_to_list(av, aeb, &ai->erase);
1665 struct ubi_eba_entry *entry;
1667 entry = &vol->eba_tbl->entries[aeb->lnum];
1668 entry->pnum = aeb->pnum;
1673 if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
1674 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)",
1675 ubi->avail_pebs, EBA_RESERVED_PEBS);
1676 if (ubi->corr_peb_count)
1677 ubi_err(ubi, "%d PEBs are corrupted and not used",
1678 ubi->corr_peb_count);
1682 ubi->avail_pebs -= EBA_RESERVED_PEBS;
1683 ubi->rsvd_pebs += EBA_RESERVED_PEBS;
1685 if (ubi->bad_allowed) {
1686 ubi_calculate_reserved(ubi);
1688 if (ubi->avail_pebs < ubi->beb_rsvd_level) {
1689 /* No enough free physical eraseblocks */
1690 ubi->beb_rsvd_pebs = ubi->avail_pebs;
1691 print_rsvd_warning(ubi, ai);
1693 ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
1695 ubi->avail_pebs -= ubi->beb_rsvd_pebs;
1696 ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
1699 dbg_eba("EBA sub-system is initialized");
1703 for (i = 0; i < num_volumes; i++) {
1704 if (!ubi->volumes[i])
1706 ubi_eba_replace_table(ubi->volumes[i], NULL);