Lines Matching +full:ubi +full:- +full:volume +full:-

1 // SPDX-License-Identifier: GPL-2.0-or-later
9 * The UBI Eraseblock Association (EBA) sub-system.
11 * This sub-system is responsible for I/O to/from logical eraseblock.
17 * The EBA sub-system implements per-logical eraseblock locking. Before
19 * per-logical eraseblock locking is implemented by means of the lock tree. The
20 * lock tree is an RB-tree which refers all the currently locked logical
26 * stored in the volume identifier header. This means that each VID header has
34 #include "ubi.h"
37 * struct ubi_eba_entry - structure encoding a single LEB -> PEB association
40 * This structure is encoding a LEB -> PEB association. Note that the LEB
49 * struct ubi_eba_table - LEB -> PEB association information
61 * ubi_next_sqnum - get next sequence number.
62 * @ubi: UBI device description object
68 unsigned long long ubi_next_sqnum(struct ubi_device *ubi) in ubi_next_sqnum() argument
72 spin_lock(&ubi->ltree_lock); in ubi_next_sqnum()
73 sqnum = ubi->global_sqnum++; in ubi_next_sqnum()
74 spin_unlock(&ubi->ltree_lock); in ubi_next_sqnum()
80 * ubi_get_compat - get compatibility flags of a volume.
81 * @ubi: UBI device description object
82 * @vol_id: volume ID
84 * This function returns compatibility flags for an internal volume. User
87 static int ubi_get_compat(const struct ubi_device *ubi, int vol_id) in ubi_get_compat() argument
95 * ubi_eba_get_ldesc - get information about a LEB
96 * @vol: volume description object
107 ldesc->lnum = lnum; in ubi_eba_get_ldesc()
108 ldesc->pnum = vol->eba_tbl->entries[lnum].pnum; in ubi_eba_get_ldesc()
112 * ubi_eba_create_table - allocate a new EBA table and initialize it with all
114 * @vol: volume containing the EBA table to copy
124 int err = -ENOMEM; in ubi_eba_create_table()
129 return ERR_PTR(-ENOMEM); in ubi_eba_create_table()
131 tbl->entries = kmalloc_array(nentries, sizeof(*tbl->entries), in ubi_eba_create_table()
133 if (!tbl->entries) in ubi_eba_create_table()
137 tbl->entries[i].pnum = UBI_LEB_UNMAPPED; in ubi_eba_create_table()
148 * ubi_eba_destroy_table - destroy an EBA table
158 kfree(tbl->entries); in ubi_eba_destroy_table()
163 * ubi_eba_copy_table - copy the EBA table attached to vol into another table
164 * @vol: volume containing the EBA table to copy
176 ubi_assert(dst && vol && vol->eba_tbl); in ubi_eba_copy_table()
178 src = vol->eba_tbl; in ubi_eba_copy_table()
181 dst->entries[i].pnum = src->entries[i].pnum; in ubi_eba_copy_table()
185 * ubi_eba_replace_table - assign a new EBA table to a volume
186 * @vol: volume containing the EBA table to copy
189 * Assign a new EBA table to the volume and release the old one.
193 ubi_eba_destroy_table(vol->eba_tbl); in ubi_eba_replace_table()
194 vol->eba_tbl = tbl; in ubi_eba_replace_table()
198 * ltree_lookup - look up the lock tree.
199 * @ubi: UBI device description object
200 * @vol_id: volume ID
205 * @ubi->ltree_lock has to be locked.
207 static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id, in ltree_lookup() argument
212 p = ubi->ltree.rb_node; in ltree_lookup()
218 if (vol_id < le->vol_id) in ltree_lookup()
219 p = p->rb_left; in ltree_lookup()
220 else if (vol_id > le->vol_id) in ltree_lookup()
221 p = p->rb_right; in ltree_lookup()
223 if (lnum < le->lnum) in ltree_lookup()
224 p = p->rb_left; in ltree_lookup()
225 else if (lnum > le->lnum) in ltree_lookup()
226 p = p->rb_right; in ltree_lookup()
236 * ltree_add_entry - add new entry to the lock tree.
237 * @ubi: UBI device description object
238 * @vol_id: volume ID
243 * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
246 static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi, in ltree_add_entry() argument
253 return ERR_PTR(-ENOMEM); in ltree_add_entry()
255 le->users = 0; in ltree_add_entry()
256 init_rwsem(&le->mutex); in ltree_add_entry()
257 le->vol_id = vol_id; in ltree_add_entry()
258 le->lnum = lnum; in ltree_add_entry()
260 spin_lock(&ubi->ltree_lock); in ltree_add_entry()
261 le1 = ltree_lookup(ubi, vol_id, lnum); in ltree_add_entry()
275 * @ubi->ltree RB-tree. in ltree_add_entry()
279 p = &ubi->ltree.rb_node; in ltree_add_entry()
284 if (vol_id < le1->vol_id) in ltree_add_entry()
285 p = &(*p)->rb_left; in ltree_add_entry()
286 else if (vol_id > le1->vol_id) in ltree_add_entry()
287 p = &(*p)->rb_right; in ltree_add_entry()
289 ubi_assert(lnum != le1->lnum); in ltree_add_entry()
290 if (lnum < le1->lnum) in ltree_add_entry()
291 p = &(*p)->rb_left; in ltree_add_entry()
293 p = &(*p)->rb_right; in ltree_add_entry()
297 rb_link_node(&le->rb, parent, p); in ltree_add_entry()
298 rb_insert_color(&le->rb, &ubi->ltree); in ltree_add_entry()
300 le->users += 1; in ltree_add_entry()
301 spin_unlock(&ubi->ltree_lock); in ltree_add_entry()
308 * leb_read_lock - lock logical eraseblock for reading.
309 * @ubi: UBI device description object
310 * @vol_id: volume ID
316 static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum) in leb_read_lock() argument
320 le = ltree_add_entry(ubi, vol_id, lnum); in leb_read_lock()
323 down_read(&le->mutex); in leb_read_lock()
328 * leb_read_unlock - unlock logical eraseblock.
329 * @ubi: UBI device description object
330 * @vol_id: volume ID
333 static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum) in leb_read_unlock() argument
337 spin_lock(&ubi->ltree_lock); in leb_read_unlock()
338 le = ltree_lookup(ubi, vol_id, lnum); in leb_read_unlock()
339 le->users -= 1; in leb_read_unlock()
340 ubi_assert(le->users >= 0); in leb_read_unlock()
341 up_read(&le->mutex); in leb_read_unlock()
342 if (le->users == 0) { in leb_read_unlock()
343 rb_erase(&le->rb, &ubi->ltree); in leb_read_unlock()
346 spin_unlock(&ubi->ltree_lock); in leb_read_unlock()
350 * leb_write_lock - lock logical eraseblock for writing.
351 * @ubi: UBI device description object
352 * @vol_id: volume ID
358 static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum) in leb_write_lock() argument
362 le = ltree_add_entry(ubi, vol_id, lnum); in leb_write_lock()
365 down_write(&le->mutex); in leb_write_lock()
370 * leb_write_trylock - try to lock logical eraseblock for writing.
371 * @ubi: UBI device description object
372 * @vol_id: volume ID
380 static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum) in leb_write_trylock() argument
384 le = ltree_add_entry(ubi, vol_id, lnum); in leb_write_trylock()
387 if (down_write_trylock(&le->mutex)) in leb_write_trylock()
391 spin_lock(&ubi->ltree_lock); in leb_write_trylock()
392 le->users -= 1; in leb_write_trylock()
393 ubi_assert(le->users >= 0); in leb_write_trylock()
394 if (le->users == 0) { in leb_write_trylock()
395 rb_erase(&le->rb, &ubi->ltree); in leb_write_trylock()
398 spin_unlock(&ubi->ltree_lock); in leb_write_trylock()
404 * leb_write_unlock - unlock logical eraseblock.
405 * @ubi: UBI device description object
406 * @vol_id: volume ID
409 static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum) in leb_write_unlock() argument
413 spin_lock(&ubi->ltree_lock); in leb_write_unlock()
414 le = ltree_lookup(ubi, vol_id, lnum); in leb_write_unlock()
415 le->users -= 1; in leb_write_unlock()
416 ubi_assert(le->users >= 0); in leb_write_unlock()
417 up_write(&le->mutex); in leb_write_unlock()
418 if (le->users == 0) { in leb_write_unlock()
419 rb_erase(&le->rb, &ubi->ltree); in leb_write_unlock()
422 spin_unlock(&ubi->ltree_lock); in leb_write_unlock()
426 * ubi_eba_is_mapped - check if a LEB is mapped.
427 * @vol: volume description object
434 return vol->eba_tbl->entries[lnum].pnum >= 0; in ubi_eba_is_mapped()
438 * ubi_eba_unmap_leb - un-map logical eraseblock.
439 * @ubi: UBI device description object
440 * @vol: volume description object
443 * This function un-maps logical eraseblock @lnum and schedules corresponding
447 int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol, in ubi_eba_unmap_leb() argument
450 int err, pnum, vol_id = vol->vol_id; in ubi_eba_unmap_leb()
452 if (ubi->ro_mode) in ubi_eba_unmap_leb()
453 return -EROFS; in ubi_eba_unmap_leb()
455 err = leb_write_lock(ubi, vol_id, lnum); in ubi_eba_unmap_leb()
459 pnum = vol->eba_tbl->entries[lnum].pnum; in ubi_eba_unmap_leb()
466 down_read(&ubi->fm_eba_sem); in ubi_eba_unmap_leb()
467 vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED; in ubi_eba_unmap_leb()
468 up_read(&ubi->fm_eba_sem); in ubi_eba_unmap_leb()
469 err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0); in ubi_eba_unmap_leb()
472 leb_write_unlock(ubi, vol_id, lnum); in ubi_eba_unmap_leb()
478 * check_mapping - check and fixup a mapping
479 * @ubi: UBI device description object
480 * @vol: volume description object
487 * Normaly during the full-scan at attach time this is fixed, for Fastmap
494 static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, in check_mapping() argument
501 if (!ubi->fast_attach) in check_mapping()
504 if (!vol->checkmap || test_bit(lnum, vol->checkmap)) in check_mapping()
507 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); in check_mapping()
509 return -ENOMEM; in check_mapping()
511 err = ubi_io_read_vid_hdr(ubi, *pnum, vidb, 0); in check_mapping()
528 down_read(&ubi->fm_eba_sem); in check_mapping()
529 vol->eba_tbl->entries[lnum].pnum = UBI_LEB_UNMAPPED; in check_mapping()
530 up_read(&ubi->fm_eba_sem); in check_mapping()
531 ubi_wl_put_peb(ubi, vol->vol_id, lnum, *pnum, torture); in check_mapping()
535 ubi_err(ubi, "unable to read VID header back from PEB %i: %i", in check_mapping()
545 found_vol_id = be32_to_cpu(vid_hdr->vol_id); in check_mapping()
546 found_lnum = be32_to_cpu(vid_hdr->lnum); in check_mapping()
548 if (found_lnum != lnum || found_vol_id != vol->vol_id) { in check_mapping()
549 ubi_err(ubi, "EBA mismatch! PEB %i is LEB %i:%i instead of LEB %i:%i", in check_mapping()
550 *pnum, found_vol_id, found_lnum, vol->vol_id, lnum); in check_mapping()
551 ubi_ro_mode(ubi); in check_mapping()
552 err = -EINVAL; in check_mapping()
557 set_bit(lnum, vol->checkmap); in check_mapping()
566 static int check_mapping(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, in check_mapping() argument
574 * ubi_eba_read_leb - read data.
575 * @ubi: UBI device description object
576 * @vol: volume description object
587 * In case of success this function returns zero. In case of a static volume,
588 * if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be
589 * returned for any volume type if an ECC error was detected by the MTD device
592 int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, in ubi_eba_read_leb() argument
595 int err, pnum, scrub = 0, vol_id = vol->vol_id; in ubi_eba_read_leb()
600 err = leb_read_lock(ubi, vol_id, lnum); in ubi_eba_read_leb()
604 pnum = vol->eba_tbl->entries[lnum].pnum; in ubi_eba_read_leb()
606 err = check_mapping(ubi, vol, lnum, &pnum); in ubi_eba_read_leb()
619 leb_read_unlock(ubi, vol_id, lnum); in ubi_eba_read_leb()
620 ubi_assert(vol->vol_type != UBI_STATIC_VOLUME); in ubi_eba_read_leb()
628 if (vol->vol_type == UBI_DYNAMIC_VOLUME) in ubi_eba_read_leb()
633 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); in ubi_eba_read_leb()
635 err = -ENOMEM; in ubi_eba_read_leb()
641 err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1); in ubi_eba_read_leb()
646 * The former case means there is a bug - in ubi_eba_read_leb()
647 * switch to read-only mode just in case. in ubi_eba_read_leb()
648 * The latter case means a real corruption - we in ubi_eba_read_leb()
654 ubi_warn(ubi, "corrupted VID header at PEB %d, LEB %d:%d", in ubi_eba_read_leb()
656 err = -EBADMSG; in ubi_eba_read_leb()
659 * Ending up here in the non-Fastmap case in ubi_eba_read_leb()
667 * This is valid and works as the layer above UBI in ubi_eba_read_leb()
671 if (ubi->fast_attach) { in ubi_eba_read_leb()
672 err = -EBADMSG; in ubi_eba_read_leb()
674 err = -EINVAL; in ubi_eba_read_leb()
675 ubi_ro_mode(ubi); in ubi_eba_read_leb()
683 ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs)); in ubi_eba_read_leb()
684 ubi_assert(len == be32_to_cpu(vid_hdr->data_size)); in ubi_eba_read_leb()
686 crc = be32_to_cpu(vid_hdr->data_crc); in ubi_eba_read_leb()
690 err = ubi_io_read_data(ubi, buf, pnum, offset, len); in ubi_eba_read_leb()
695 if (vol->vol_type == UBI_DYNAMIC_VOLUME) in ubi_eba_read_leb()
699 ubi_msg(ubi, "force data checking"); in ubi_eba_read_leb()
710 ubi_warn(ubi, "CRC error: calculated %#08x, must be %#08x", in ubi_eba_read_leb()
712 err = -EBADMSG; in ubi_eba_read_leb()
718 err = ubi_wl_scrub_peb(ubi, pnum); in ubi_eba_read_leb()
720 leb_read_unlock(ubi, vol_id, lnum); in ubi_eba_read_leb()
726 leb_read_unlock(ubi, vol_id, lnum); in ubi_eba_read_leb()
731 * ubi_eba_read_leb_sg - read data into a scatter gather list.
732 * @ubi: UBI device description object
733 * @vol: volume description object
735 * @sgl: UBI scatter gather list to store the read data
741 * storing the read data into a buffer it writes to an UBI scatter gather
744 int ubi_eba_read_leb_sg(struct ubi_device *ubi, struct ubi_volume *vol, in ubi_eba_read_leb_sg() argument
753 ubi_assert(sgl->list_pos < UBI_MAX_SG_COUNT); in ubi_eba_read_leb_sg()
754 sg = &sgl->sg[sgl->list_pos]; in ubi_eba_read_leb_sg()
755 if (len < sg->length - sgl->page_pos) in ubi_eba_read_leb_sg()
758 to_read = sg->length - sgl->page_pos; in ubi_eba_read_leb_sg()
760 ret = ubi_eba_read_leb(ubi, vol, lnum, in ubi_eba_read_leb_sg()
761 sg_virt(sg) + sgl->page_pos, offset, in ubi_eba_read_leb_sg()
767 len -= to_read; in ubi_eba_read_leb_sg()
769 sgl->page_pos += to_read; in ubi_eba_read_leb_sg()
770 if (sgl->page_pos == sg->length) { in ubi_eba_read_leb_sg()
771 sgl->list_pos++; in ubi_eba_read_leb_sg()
772 sgl->page_pos = 0; in ubi_eba_read_leb_sg()
778 sgl->list_pos++; in ubi_eba_read_leb_sg()
779 sgl->page_pos = 0; in ubi_eba_read_leb_sg()
786 * try_recover_peb - try to recover from write failure.
787 * @vol: volume description object
807 struct ubi_device *ubi = vol->ubi; in try_recover_peb() local
809 int new_pnum, err, vol_id = vol->vol_id, data_size; in try_recover_peb()
814 new_pnum = ubi_wl_get_peb(ubi); in try_recover_peb()
820 ubi_msg(ubi, "recover PEB %d, move data to PEB %d", in try_recover_peb()
823 err = ubi_io_read_vid_hdr(ubi, pnum, vidb, 1); in try_recover_peb()
826 err = -EIO; in try_recover_peb()
831 ubi_assert(vid_hdr->vol_type == UBI_VID_DYNAMIC); in try_recover_peb()
833 mutex_lock(&ubi->buf_mutex); in try_recover_peb()
834 memset(ubi->peb_buf + offset, 0xFF, len); in try_recover_peb()
838 err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset); in try_recover_peb()
845 memcpy(ubi->peb_buf + offset, buf, len); in try_recover_peb()
848 crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size); in try_recover_peb()
849 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); in try_recover_peb()
850 vid_hdr->copy_flag = 1; in try_recover_peb()
851 vid_hdr->data_size = cpu_to_be32(data_size); in try_recover_peb()
852 vid_hdr->data_crc = cpu_to_be32(crc); in try_recover_peb()
853 err = ubi_io_write_vid_hdr(ubi, new_pnum, vidb); in try_recover_peb()
857 err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size); in try_recover_peb()
860 mutex_unlock(&ubi->buf_mutex); in try_recover_peb()
863 vol->eba_tbl->entries[lnum].pnum = new_pnum; in try_recover_peb()
866 up_read(&ubi->fm_eba_sem); in try_recover_peb()
869 ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); in try_recover_peb()
870 ubi_msg(ubi, "data was successfully recovered"); in try_recover_peb()
876 ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1); in try_recover_peb()
877 ubi_warn(ubi, "failed to write to PEB %d", new_pnum); in try_recover_peb()
884 * recover_peb - recover from write failure.
885 * @ubi: UBI device description object
887 * @vol_id: volume ID
899 static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum, in recover_peb() argument
902 int err, idx = vol_id2idx(ubi, vol_id), tries; in recover_peb()
903 struct ubi_volume *vol = ubi->volumes[idx]; in recover_peb()
906 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); in recover_peb()
908 return -ENOMEM; in recover_peb()
918 ubi_msg(ubi, "try again"); in recover_peb()
927 * try_write_vid_and_data - try to write VID header and data to a new PEB.
928 * @vol: volume description object
936 * eraseblock @lnum of volume @vol to a new physical eraseblock. Returns zero
945 struct ubi_device *ubi = vol->ubi; in try_write_vid_and_data() local
946 int pnum, opnum, err, err2, vol_id = vol->vol_id; in try_write_vid_and_data()
948 pnum = ubi_wl_get_peb(ubi); in try_write_vid_and_data()
954 opnum = vol->eba_tbl->entries[lnum].pnum; in try_write_vid_and_data()
959 err = ubi_io_write_vid_hdr(ubi, pnum, vidb); in try_write_vid_and_data()
961 ubi_warn(ubi, "failed to write VID header to LEB %d:%d, PEB %d", in try_write_vid_and_data()
967 err = ubi_io_write_data(ubi, buf, pnum, offset, len); in try_write_vid_and_data()
969 ubi_warn(ubi, in try_write_vid_and_data()
976 vol->eba_tbl->entries[lnum].pnum = pnum; in try_write_vid_and_data()
979 up_read(&ubi->fm_eba_sem); in try_write_vid_and_data()
982 err2 = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1); in try_write_vid_and_data()
984 ubi_warn(ubi, "failed to return physical eraseblock %d, error %d", in try_write_vid_and_data()
988 err2 = ubi_wl_put_peb(ubi, vol_id, lnum, opnum, 0); in try_write_vid_and_data()
990 ubi_warn(ubi, "failed to return physical eraseblock %d, error %d", in try_write_vid_and_data()
999 * ubi_eba_write_leb - write data to dynamic volume.
1000 * @ubi: UBI device description object
1001 * @vol: volume description object
1007 * This function writes data to logical eraseblock @lnum of a dynamic volume
1013 int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum, in ubi_eba_write_leb() argument
1016 int err, pnum, tries, vol_id = vol->vol_id; in ubi_eba_write_leb()
1020 if (ubi->ro_mode) in ubi_eba_write_leb()
1021 return -EROFS; in ubi_eba_write_leb()
1023 err = leb_write_lock(ubi, vol_id, lnum); in ubi_eba_write_leb()
1027 pnum = vol->eba_tbl->entries[lnum].pnum; in ubi_eba_write_leb()
1029 err = check_mapping(ubi, vol, lnum, &pnum); in ubi_eba_write_leb()
1038 err = ubi_io_write_data(ubi, buf, pnum, offset, len); in ubi_eba_write_leb()
1040 ubi_warn(ubi, "failed to write data to PEB %d", pnum); in ubi_eba_write_leb()
1041 if (err == -EIO && ubi->bad_allowed) in ubi_eba_write_leb()
1042 err = recover_peb(ubi, pnum, vol_id, lnum, buf, in ubi_eba_write_leb()
1051 * eraseblock and write the volume identifier header there first. in ubi_eba_write_leb()
1053 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); in ubi_eba_write_leb()
1055 leb_write_unlock(ubi, vol_id, lnum); in ubi_eba_write_leb()
1056 return -ENOMEM; in ubi_eba_write_leb()
1061 vid_hdr->vol_type = UBI_VID_DYNAMIC; in ubi_eba_write_leb()
1062 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); in ubi_eba_write_leb()
1063 vid_hdr->vol_id = cpu_to_be32(vol_id); in ubi_eba_write_leb()
1064 vid_hdr->lnum = cpu_to_be32(lnum); in ubi_eba_write_leb()
1065 vid_hdr->compat = ubi_get_compat(ubi, vol_id); in ubi_eba_write_leb()
1066 vid_hdr->data_pad = cpu_to_be32(vol->data_pad); in ubi_eba_write_leb()
1070 if (err != -EIO || !ubi->bad_allowed) in ubi_eba_write_leb()
1079 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); in ubi_eba_write_leb()
1080 ubi_msg(ubi, "try another PEB"); in ubi_eba_write_leb()
1087 ubi_ro_mode(ubi); in ubi_eba_write_leb()
1089 leb_write_unlock(ubi, vol_id, lnum); in ubi_eba_write_leb()
1095 * ubi_eba_write_leb_st - write data to static volume.
1096 * @ubi: UBI device description object
1097 * @vol: volume description object
1101 * @used_ebs: how many logical eraseblocks will this volume contain
1103 * This function writes data to logical eraseblock @lnum of static volume
1105 * eraseblock in this static volume.
1116 int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol, in ubi_eba_write_leb_st() argument
1119 int err, tries, data_size = len, vol_id = vol->vol_id; in ubi_eba_write_leb_st()
1124 if (ubi->ro_mode) in ubi_eba_write_leb_st()
1125 return -EROFS; in ubi_eba_write_leb_st()
1127 if (lnum == used_ebs - 1) in ubi_eba_write_leb_st()
1129 len = ALIGN(data_size, ubi->min_io_size); in ubi_eba_write_leb_st()
1131 ubi_assert(!(len & (ubi->min_io_size - 1))); in ubi_eba_write_leb_st()
1133 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); in ubi_eba_write_leb_st()
1135 return -ENOMEM; in ubi_eba_write_leb_st()
1139 err = leb_write_lock(ubi, vol_id, lnum); in ubi_eba_write_leb_st()
1143 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); in ubi_eba_write_leb_st()
1144 vid_hdr->vol_id = cpu_to_be32(vol_id); in ubi_eba_write_leb_st()
1145 vid_hdr->lnum = cpu_to_be32(lnum); in ubi_eba_write_leb_st()
1146 vid_hdr->compat = ubi_get_compat(ubi, vol_id); in ubi_eba_write_leb_st()
1147 vid_hdr->data_pad = cpu_to_be32(vol->data_pad); in ubi_eba_write_leb_st()
1150 vid_hdr->vol_type = UBI_VID_STATIC; in ubi_eba_write_leb_st()
1151 vid_hdr->data_size = cpu_to_be32(data_size); in ubi_eba_write_leb_st()
1152 vid_hdr->used_ebs = cpu_to_be32(used_ebs); in ubi_eba_write_leb_st()
1153 vid_hdr->data_crc = cpu_to_be32(crc); in ubi_eba_write_leb_st()
1155 ubi_assert(vol->eba_tbl->entries[lnum].pnum < 0); in ubi_eba_write_leb_st()
1159 if (err != -EIO || !ubi->bad_allowed) in ubi_eba_write_leb_st()
1162 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); in ubi_eba_write_leb_st()
1163 ubi_msg(ubi, "try another PEB"); in ubi_eba_write_leb_st()
1167 ubi_ro_mode(ubi); in ubi_eba_write_leb_st()
1169 leb_write_unlock(ubi, vol_id, lnum); in ubi_eba_write_leb_st()
1178 * ubi_eba_atomic_leb_change - change logical eraseblock atomically.
1179 * @ubi: UBI device description object
1180 * @vol: volume description object
1186 * has to contain new logical eraseblock data, and @len - the length of the
1191 * UBI reserves one LEB for the "atomic LEB change" operation, so only one
1192 * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
1194 int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, in ubi_eba_atomic_leb_change() argument
1197 int err, tries, vol_id = vol->vol_id; in ubi_eba_atomic_leb_change()
1202 if (ubi->ro_mode) in ubi_eba_atomic_leb_change()
1203 return -EROFS; in ubi_eba_atomic_leb_change()
1210 err = ubi_eba_unmap_leb(ubi, vol, lnum); in ubi_eba_atomic_leb_change()
1213 return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0); in ubi_eba_atomic_leb_change()
1216 vidb = ubi_alloc_vid_buf(ubi, GFP_NOFS); in ubi_eba_atomic_leb_change()
1218 return -ENOMEM; in ubi_eba_atomic_leb_change()
1222 mutex_lock(&ubi->alc_mutex); in ubi_eba_atomic_leb_change()
1223 err = leb_write_lock(ubi, vol_id, lnum); in ubi_eba_atomic_leb_change()
1227 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); in ubi_eba_atomic_leb_change()
1228 vid_hdr->vol_id = cpu_to_be32(vol_id); in ubi_eba_atomic_leb_change()
1229 vid_hdr->lnum = cpu_to_be32(lnum); in ubi_eba_atomic_leb_change()
1230 vid_hdr->compat = ubi_get_compat(ubi, vol_id); in ubi_eba_atomic_leb_change()
1231 vid_hdr->data_pad = cpu_to_be32(vol->data_pad); in ubi_eba_atomic_leb_change()
1234 vid_hdr->vol_type = UBI_VID_DYNAMIC; in ubi_eba_atomic_leb_change()
1235 vid_hdr->data_size = cpu_to_be32(len); in ubi_eba_atomic_leb_change()
1236 vid_hdr->copy_flag = 1; in ubi_eba_atomic_leb_change()
1237 vid_hdr->data_crc = cpu_to_be32(crc); in ubi_eba_atomic_leb_change()
1243 if (err != -EIO || !ubi->bad_allowed) in ubi_eba_atomic_leb_change()
1246 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); in ubi_eba_atomic_leb_change()
1247 ubi_msg(ubi, "try another PEB"); in ubi_eba_atomic_leb_change()
1252 * something nasty and unexpected happened. Switch to read-only in ubi_eba_atomic_leb_change()
1256 ubi_ro_mode(ubi); in ubi_eba_atomic_leb_change()
1258 leb_write_unlock(ubi, vol_id, lnum); in ubi_eba_atomic_leb_change()
1261 mutex_unlock(&ubi->alc_mutex); in ubi_eba_atomic_leb_change()
1267 * is_error_sane - check whether a read error is sane.
1272 * code is sane, then we treat this error as non-fatal. Otherwise the error is
1273 * fatal and UBI will be switched to R/O mode later.
1276 * something which suggests there was a real read problem. E.g., %-EIO. Or a
1277 * memory allocation failed (-%ENOMEM). Otherwise, it is safer to switch to R/O
1287 if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_HDR || in is_error_sane()
1288 err == UBI_IO_BAD_HDR_EBADMSG || err == -ETIMEDOUT) in is_error_sane()
1294 * ubi_eba_copy_leb - copy logical eraseblock.
1295 * @ubi: UBI device description object
1307 int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, in ubi_eba_copy_leb() argument
1315 ubi_assert(rwsem_is_locked(&ubi->fm_eba_sem)); in ubi_eba_copy_leb()
1317 vol_id = be32_to_cpu(vid_hdr->vol_id); in ubi_eba_copy_leb()
1318 lnum = be32_to_cpu(vid_hdr->lnum); in ubi_eba_copy_leb()
1322 if (vid_hdr->vol_type == UBI_VID_STATIC) { in ubi_eba_copy_leb()
1323 data_size = be32_to_cpu(vid_hdr->data_size); in ubi_eba_copy_leb()
1324 aldata_size = ALIGN(data_size, ubi->min_io_size); in ubi_eba_copy_leb()
1327 ubi->leb_size - be32_to_cpu(vid_hdr->data_pad); in ubi_eba_copy_leb()
1329 idx = vol_id2idx(ubi, vol_id); in ubi_eba_copy_leb()
1330 spin_lock(&ubi->volumes_lock); in ubi_eba_copy_leb()
1332 * Note, we may race with volume deletion, which means that the volume in ubi_eba_copy_leb()
1334 * volume deletion un-maps all the volume's logical eraseblocks, it will in ubi_eba_copy_leb()
1337 vol = ubi->volumes[idx]; in ubi_eba_copy_leb()
1338 spin_unlock(&ubi->volumes_lock); in ubi_eba_copy_leb()
1341 dbg_wl("volume %d is being removed, cancel", vol_id); in ubi_eba_copy_leb()
1349 * Note, we are using non-waiting locking here, because we cannot sleep in ubi_eba_copy_leb()
1353 * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are in ubi_eba_copy_leb()
1354 * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the in ubi_eba_copy_leb()
1357 * we do not know the reasons of the contention - it may be just a in ubi_eba_copy_leb()
1358 * normal I/O on this LEB, so we want to re-try. in ubi_eba_copy_leb()
1360 err = leb_write_trylock(ubi, vol_id, lnum); in ubi_eba_copy_leb()
1368 * probably waiting on @ubi->move_mutex. No need to continue the work, in ubi_eba_copy_leb()
1371 if (vol->eba_tbl->entries[lnum].pnum != from) { in ubi_eba_copy_leb()
1373 vol_id, lnum, from, vol->eba_tbl->entries[lnum].pnum); in ubi_eba_copy_leb()
1380 * this function utilizes the @ubi->peb_buf buffer which is shared in ubi_eba_copy_leb()
1381 * with some other functions - we lock the buffer by taking the in ubi_eba_copy_leb()
1382 * @ubi->buf_mutex. in ubi_eba_copy_leb()
1384 mutex_lock(&ubi->buf_mutex); in ubi_eba_copy_leb()
1386 err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size); in ubi_eba_copy_leb()
1388 ubi_warn(ubi, "error %d while reading data from PEB %d", in ubi_eba_copy_leb()
1396 * case of a static volume it is fairly easy - the VID header contains in ubi_eba_copy_leb()
1397 * the data size. In case of a dynamic volume it is more difficult - we in ubi_eba_copy_leb()
1400 * may have some side-effects. And not only this. It is important not in ubi_eba_copy_leb()
1404 if (vid_hdr->vol_type == UBI_VID_DYNAMIC) in ubi_eba_copy_leb()
1406 ubi_calc_data_len(ubi, ubi->peb_buf, data_size); in ubi_eba_copy_leb()
1409 crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size); in ubi_eba_copy_leb()
1416 * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc. in ubi_eba_copy_leb()
1419 vid_hdr->copy_flag = 1; in ubi_eba_copy_leb()
1420 vid_hdr->data_size = cpu_to_be32(data_size); in ubi_eba_copy_leb()
1421 vid_hdr->data_crc = cpu_to_be32(crc); in ubi_eba_copy_leb()
1423 vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi)); in ubi_eba_copy_leb()
1425 err = ubi_io_write_vid_hdr(ubi, to, vidb); in ubi_eba_copy_leb()
1427 if (err == -EIO) in ubi_eba_copy_leb()
1435 err = ubi_io_read_vid_hdr(ubi, to, vidb, 1); in ubi_eba_copy_leb()
1438 ubi_warn(ubi, "error %d while reading VID header back from PEB %d", in ubi_eba_copy_leb()
1448 err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size); in ubi_eba_copy_leb()
1450 if (err == -EIO) in ubi_eba_copy_leb()
1458 ubi_assert(vol->eba_tbl->entries[lnum].pnum == from); in ubi_eba_copy_leb()
1464 spin_lock(&ubi->volumes_lock); in ubi_eba_copy_leb()
1465 vol->eba_tbl->entries[lnum].pnum = to; in ubi_eba_copy_leb()
1466 spin_unlock(&ubi->volumes_lock); in ubi_eba_copy_leb()
1469 mutex_unlock(&ubi->buf_mutex); in ubi_eba_copy_leb()
1471 leb_write_unlock(ubi, vol_id, lnum); in ubi_eba_copy_leb()
1476 * print_rsvd_warning - warn about not having enough reserved PEBs.
1477 * @ubi: UBI device description object
1478 * @ai: UBI attach info object
1480 * This is a helper function for 'ubi_eba_init()' which is called when UBI
1484 * o if this is a new UBI image, then just print the warning
1485 * o if this is an UBI image which has already been used for some time, print
1489 * The idea is that when UBI is used, PEBs become bad, and the reserved pool
1494 static void print_rsvd_warning(struct ubi_device *ubi, in print_rsvd_warning() argument
1501 if (ai->max_sqnum > (1 << 18)) { in print_rsvd_warning()
1502 int min = ubi->beb_rsvd_level / 10; in print_rsvd_warning()
1506 if (ubi->beb_rsvd_pebs > min) in print_rsvd_warning()
1510 ubi_warn(ubi, "cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d", in print_rsvd_warning()
1511 ubi->beb_rsvd_pebs, ubi->beb_rsvd_level); in print_rsvd_warning()
1512 if (ubi->corr_peb_count) in print_rsvd_warning()
1513 ubi_warn(ubi, "%d PEBs are corrupted and not used", in print_rsvd_warning()
1514 ubi->corr_peb_count); in print_rsvd_warning()
1518 * self_check_eba - run a self check on the EBA table constructed by fastmap.
1519 * @ubi: UBI device description object
1520 * @ai_fastmap: UBI attach info object created by fastmap
1521 * @ai_scan: UBI attach info object created by scanning
1527 int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap, in self_check_eba() argument
1537 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; in self_check_eba()
1541 return -ENOMEM; in self_check_eba()
1546 return -ENOMEM; in self_check_eba()
1550 vol = ubi->volumes[i]; in self_check_eba()
1554 scan_eba[i] = kmalloc_array(vol->reserved_pebs, in self_check_eba()
1558 ret = -ENOMEM; in self_check_eba()
1562 fm_eba[i] = kmalloc_array(vol->reserved_pebs, in self_check_eba()
1566 ret = -ENOMEM; in self_check_eba()
1571 for (j = 0; j < vol->reserved_pebs; j++) in self_check_eba()
1574 av = ubi_find_av(ai_scan, idx2vol_id(ubi, i)); in self_check_eba()
1578 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) in self_check_eba()
1579 scan_eba[i][aeb->lnum] = aeb->pnum; in self_check_eba()
1581 av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i)); in self_check_eba()
1585 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) in self_check_eba()
1586 fm_eba[i][aeb->lnum] = aeb->pnum; in self_check_eba()
1588 for (j = 0; j < vol->reserved_pebs; j++) { in self_check_eba()
1594 ubi_err(ubi, "LEB:%i:%i is PEB:%i instead of %i!", in self_check_eba()
1595 vol->vol_id, j, fm_eba[i][j], in self_check_eba()
1603 while (--i >= 0) { in self_check_eba()
1604 if (!ubi->volumes[i]) in self_check_eba()
1617 * ubi_eba_init - initialize the EBA sub-system using attaching information.
1618 * @ubi: UBI device description object
1624 int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai) in ubi_eba_init() argument
1632 dbg_eba("initialize EBA sub-system"); in ubi_eba_init()
1634 spin_lock_init(&ubi->ltree_lock); in ubi_eba_init()
1635 mutex_init(&ubi->alc_mutex); in ubi_eba_init()
1636 ubi->ltree = RB_ROOT; in ubi_eba_init()
1638 ubi->global_sqnum = ai->max_sqnum + 1; in ubi_eba_init()
1639 num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT; in ubi_eba_init()
1644 vol = ubi->volumes[i]; in ubi_eba_init()
1650 tbl = ubi_eba_create_table(vol, vol->reserved_pebs); in ubi_eba_init()
1658 av = ubi_find_av(ai, idx2vol_id(ubi, i)); in ubi_eba_init()
1662 ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) { in ubi_eba_init()
1663 if (aeb->lnum >= vol->reserved_pebs) { in ubi_eba_init()
1666 * during re-size. in ubi_eba_init()
1668 ubi_move_aeb_to_list(av, aeb, &ai->erase); in ubi_eba_init()
1672 entry = &vol->eba_tbl->entries[aeb->lnum]; in ubi_eba_init()
1673 entry->pnum = aeb->pnum; in ubi_eba_init()
1678 if (ubi->avail_pebs < EBA_RESERVED_PEBS) { in ubi_eba_init()
1679 ubi_err(ubi, "no enough physical eraseblocks (%d, need %d)", in ubi_eba_init()
1680 ubi->avail_pebs, EBA_RESERVED_PEBS); in ubi_eba_init()
1681 if (ubi->corr_peb_count) in ubi_eba_init()
1682 ubi_err(ubi, "%d PEBs are corrupted and not used", in ubi_eba_init()
1683 ubi->corr_peb_count); in ubi_eba_init()
1684 err = -ENOSPC; in ubi_eba_init()
1687 ubi->avail_pebs -= EBA_RESERVED_PEBS; in ubi_eba_init()
1688 ubi->rsvd_pebs += EBA_RESERVED_PEBS; in ubi_eba_init()
1690 if (ubi->bad_allowed) { in ubi_eba_init()
1691 ubi_calculate_reserved(ubi); in ubi_eba_init()
1693 if (ubi->avail_pebs < ubi->beb_rsvd_level) { in ubi_eba_init()
1695 ubi->beb_rsvd_pebs = ubi->avail_pebs; in ubi_eba_init()
1696 print_rsvd_warning(ubi, ai); in ubi_eba_init()
1698 ubi->beb_rsvd_pebs = ubi->beb_rsvd_level; in ubi_eba_init()
1700 ubi->avail_pebs -= ubi->beb_rsvd_pebs; in ubi_eba_init()
1701 ubi->rsvd_pebs += ubi->beb_rsvd_pebs; in ubi_eba_init()
1704 dbg_eba("EBA sub-system is initialized"); in ubi_eba_init()
1709 if (!ubi->volumes[i]) in ubi_eba_init()
1711 ubi_eba_replace_table(ubi->volumes[i], NULL); in ubi_eba_init()