Lines Matching +full:wear +full:- +full:leveling
1 // SPDX-License-Identifier: GPL-2.0-or-later
9 * UBI wear-leveling sub-system.
11 * This sub-system is responsible for wear-leveling. It works in terms of
13 * eraseblocks, volumes, etc. From this sub-system's perspective all physical
14 * eraseblocks are of two types - used and free. Used physical eraseblocks are
21 * When physical eraseblocks are returned to the WL sub-system by means of the
23 * done asynchronously in context of the per-UBI device background thread,
24 * which is also managed by the WL sub-system.
26 * The wear-leveling is ensured by means of moving the contents of used
30 * If the WL sub-system fails to erase a physical eraseblock, it marks it as
33 * This sub-system is also responsible for scrubbing. If a bit-flip is detected
35 * as moving it for wear-leveling reasons.
37 * As it was said, for the UBI sub-system all physical eraseblocks are either
38 * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
39 * used eraseblocks are kept in @wl->used, @wl->erroneous, or @wl->scrub
40 * RB-trees, as well as (temporarily) in the @wl->pq queue.
42 * When the WL sub-system returns a physical eraseblock, the physical
44 * the physical eraseblock is not directly moved from the @wl->free tree to the
45 * @wl->used tree. There is a protection queue in between where this
46 * physical eraseblock is temporarily stored (@wl->pq).
63 * used. The former state corresponds to the @wl->free tree. The latter state
64 * is split up on several sub-states:
65 * o the WL movement is allowed (@wl->used tree);
66 * o the WL movement is disallowed (@wl->erroneous) because the PEB is
67 * erroneous - e.g., there was a read error;
68 * o the WL movement is temporarily prohibited (@wl->pq queue);
69 * o scrubbing is needed (@wl->scrub tree).
71 * Depending on the sub-state, wear-leveling entries of the used physical
74 * Note, in this implementation, we keep a small in-RAM object for each physical
77 * re-work this sub-system and make it more scalable.
79 * At the moment this sub-system does not utilize the sequence number, which
85 * room for future re-works of the WL sub-system.
95 /* Number of physical eraseblocks reserved for wear-leveling purposes */
100 * exceeded, the WL sub-system starts moving data from used physical
107 * When a physical eraseblock is moved, the WL sub-system has to pick the target
110 * to an unlimited wear of one or few physical eraseblock. Indeed, imagine a
113 * counter of the free physical eraseblock to pick. Namely, the WL sub-system
121 * switch to read-only mode.
132 * wl_tree_add - add a wear-leveling entry to a WL RB-tree.
133 * @e: the wear-leveling entry to add
137 * the @ubi->used and @ubi->free RB-trees.
143 p = &root->rb_node; in wl_tree_add()
150 if (e->ec < e1->ec) in wl_tree_add()
151 p = &(*p)->rb_left; in wl_tree_add()
152 else if (e->ec > e1->ec) in wl_tree_add()
153 p = &(*p)->rb_right; in wl_tree_add()
155 ubi_assert(e->pnum != e1->pnum); in wl_tree_add()
156 if (e->pnum < e1->pnum) in wl_tree_add()
157 p = &(*p)->rb_left; in wl_tree_add()
159 p = &(*p)->rb_right; in wl_tree_add()
163 rb_link_node(&e->u.rb, parent, p); in wl_tree_add()
164 rb_insert_color(&e->u.rb, root); in wl_tree_add()
168 * wl_entry_destroy - destroy a wear-leveling entry.
170 * @e: the wear-leveling entry to add
172 * This function destroys a wear leveling entry and removes
177 ubi->lookuptbl[e->pnum] = NULL; in wl_entry_destroy()
182 * do_work - do one pending work.
198 * @ubi->work_sem is used to synchronize with the workers. Workers take in do_work()
203 down_read(&ubi->work_sem); in do_work()
204 spin_lock(&ubi->wl_lock); in do_work()
205 if (list_empty(&ubi->works)) { in do_work()
206 spin_unlock(&ubi->wl_lock); in do_work()
207 up_read(&ubi->work_sem); in do_work()
215 wrk = list_entry(ubi->works.next, struct ubi_work, list); in do_work()
216 list_del(&wrk->list); in do_work()
217 ubi->works_count -= 1; in do_work()
218 ubi_assert(ubi->works_count >= 0); in do_work()
219 spin_unlock(&ubi->wl_lock); in do_work()
226 err = wrk->func(ubi, wrk, 0); in do_work()
229 up_read(&ubi->work_sem); in do_work()
235 * in_wl_tree - check if wear-leveling entry is present in a WL RB-tree.
236 * @e: the wear-leveling entry to check
239 * This function returns non-zero if @e is in the @root RB-tree and zero if it
246 p = root->rb_node; in in_wl_tree()
252 if (e->pnum == e1->pnum) { in in_wl_tree()
257 if (e->ec < e1->ec) in in_wl_tree()
258 p = p->rb_left; in in_wl_tree()
259 else if (e->ec > e1->ec) in in_wl_tree()
260 p = p->rb_right; in in_wl_tree()
262 ubi_assert(e->pnum != e1->pnum); in in_wl_tree()
263 if (e->pnum < e1->pnum) in in_wl_tree()
264 p = p->rb_left; in in_wl_tree()
266 p = p->rb_right; in in_wl_tree()
274 * in_pq - check if a wear-leveling entry is present in the protection queue.
276 * @e: the wear-leveling entry to check
278 * This function returns non-zero if @e is in the protection queue and zero
287 list_for_each_entry(p, &ubi->pq[i], u.list) in in_pq()
295 * prot_queue_add - add physical eraseblock to the protection queue.
299 * This function adds @e to the tail of the protection queue @ubi->pq, where
301 * temporarily protected from the wear-leveling worker. Note, @wl->lock has to
306 int pq_tail = ubi->pq_head - 1; in prot_queue_add()
309 pq_tail = UBI_PROT_QUEUE_LEN - 1; in prot_queue_add()
311 list_add_tail(&e->u.list, &ubi->pq[pq_tail]); in prot_queue_add()
312 dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec); in prot_queue_add()
316 * find_wl_entry - find wear-leveling entry closest to certain erase counter.
318 * @root: the RB-tree where to look for
322 * This function looks for a wear leveling entry with erase counter closest to
334 max = e->ec + diff; in find_wl_entry()
336 p = root->rb_node; in find_wl_entry()
341 if (e1->ec >= max) { in find_wl_entry()
344 p = p->rb_left; in find_wl_entry()
346 p = p->rb_right; in find_wl_entry()
355 * find_mean_wl_entry - find wear-leveling entry with medium erase counter.
357 * @root: the RB-tree where to look for
359 * This function looks for a wear leveling entry with medium erase counter,
371 if (last->ec - first->ec < WL_FREE_MAX_DIFF) { in find_mean_wl_entry()
372 e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb); in find_mean_wl_entry()
388 * wl_get_wle - get a mean wl entry to be used by ubi_wl_get_peb() or
392 * This function returns a wear leveling entry in case of success and
399 e = find_mean_wl_entry(ubi, &ubi->free); in wl_get_wle()
405 self_check_in_wl_tree(ubi, e, &ubi->free); in wl_get_wle()
411 rb_erase(&e->u.rb, &ubi->free); in wl_get_wle()
412 ubi->free_count--; in wl_get_wle()
413 dbg_wl("PEB %d EC %d", e->pnum, e->ec); in wl_get_wle()
419 * prot_queue_del - remove a physical eraseblock from the protection queue.
424 * in case of success and %-ENODEV if the PEB was not found.
430 e = ubi->lookuptbl[pnum]; in prot_queue_del()
432 return -ENODEV; in prot_queue_del()
435 return -ENODEV; in prot_queue_del()
437 list_del(&e->u.list); in prot_queue_del()
438 dbg_wl("deleted PEB %d from the protection queue", e->pnum); in prot_queue_del()
443 * ubi_sync_erase - synchronously erase a physical eraseblock.
455 unsigned long long ec = e->ec; in ubi_sync_erase()
457 dbg_wl("erase PEB %d, old EC %llu", e->pnum, ec); in ubi_sync_erase()
459 err = self_check_ec(ubi, e->pnum, e->ec); in ubi_sync_erase()
461 return -EINVAL; in ubi_sync_erase()
463 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS); in ubi_sync_erase()
465 return -ENOMEM; in ubi_sync_erase()
467 err = ubi_io_sync_erase(ubi, e->pnum, torture); in ubi_sync_erase()
474 * Erase counter overflow. Upgrade UBI and use 64-bit in ubi_sync_erase()
478 e->pnum, ec); in ubi_sync_erase()
479 err = -EINVAL; in ubi_sync_erase()
483 dbg_wl("erased PEB %d, new EC %llu", e->pnum, ec); in ubi_sync_erase()
485 ec_hdr->ec = cpu_to_be64(ec); in ubi_sync_erase()
487 err = ubi_io_write_ec_hdr(ubi, e->pnum, ec_hdr); in ubi_sync_erase()
491 e->ec = ec; in ubi_sync_erase()
492 spin_lock(&ubi->wl_lock); in ubi_sync_erase()
493 if (e->ec > ubi->max_ec) in ubi_sync_erase()
494 ubi->max_ec = e->ec; in ubi_sync_erase()
495 spin_unlock(&ubi->wl_lock); in ubi_sync_erase()
503 * serve_prot_queue - check if it is time to stop protecting PEBs.
521 spin_lock(&ubi->wl_lock); in serve_prot_queue()
522 list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) { in serve_prot_queue()
524 e->pnum, e->ec); in serve_prot_queue()
526 list_del(&e->u.list); in serve_prot_queue()
527 wl_tree_add(e, &ubi->used); in serve_prot_queue()
533 spin_unlock(&ubi->wl_lock); in serve_prot_queue()
539 ubi->pq_head += 1; in serve_prot_queue()
540 if (ubi->pq_head == UBI_PROT_QUEUE_LEN) in serve_prot_queue()
541 ubi->pq_head = 0; in serve_prot_queue()
542 ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN); in serve_prot_queue()
543 spin_unlock(&ubi->wl_lock); in serve_prot_queue()
547 * __schedule_ubi_work - schedule a work.
552 * list. Can only be used if ubi->work_sem is already held in read mode!
556 spin_lock(&ubi->wl_lock); in __schedule_ubi_work()
557 list_add_tail(&wrk->list, &ubi->works); in __schedule_ubi_work()
558 ubi_assert(ubi->works_count >= 0); in __schedule_ubi_work()
559 ubi->works_count += 1; in __schedule_ubi_work()
560 if (ubi->thread_enabled && !ubi_dbg_is_bgt_disabled(ubi)) in __schedule_ubi_work()
561 wake_up_process(ubi->bgt_thread); in __schedule_ubi_work()
562 spin_unlock(&ubi->wl_lock); in __schedule_ubi_work()
566 * schedule_ubi_work - schedule a work.
575 down_read(&ubi->work_sem); in schedule_ubi_work()
577 up_read(&ubi->work_sem); in schedule_ubi_work()
584 * schedule_erase - schedule an erase work.
592 * This function returns zero in case of success and a %-ENOMEM in case of
603 e->pnum, e->ec, torture); in schedule_erase()
607 return -ENOMEM; in schedule_erase()
609 wl_wrk->func = &erase_worker; in schedule_erase()
610 wl_wrk->e = e; in schedule_erase()
611 wl_wrk->vol_id = vol_id; in schedule_erase()
612 wl_wrk->lnum = lnum; in schedule_erase()
613 wl_wrk->torture = torture; in schedule_erase()
624 * do_sync_erase - run the erase worker synchronously.
637 dbg_wl("sync erase of PEB %i", e->pnum); in do_sync_erase()
649 * wear_leveling_worker - wear-leveling worker function.
652 * @shutdown: non-zero if the worker has to free memory and exit
653 * because the WL-subsystem is shutting down
663 int erase = 0, keep = 0, vol_id = -1, lnum = -1;
675 return -ENOMEM;
679 down_read(&ubi->fm_eba_sem);
680 mutex_lock(&ubi->move_mutex);
681 spin_lock(&ubi->wl_lock);
682 ubi_assert(!ubi->move_from && !ubi->move_to);
683 ubi_assert(!ubi->move_to_put);
688 if (!ubi->free.rb_node ||
690 (!ubi->used.rb_node && !ubi->scrub.rb_node)) {
693 * the queue to be erased. Cancel movement - it will be
698 * @ubi->used tree later and the wear-leveling will be
702 !ubi->free.rb_node, !ubi->used.rb_node);
707 e1 = find_anchor_wl_entry(&ubi->used);
708 if (e1 && ubi->fm_anchor &&
709 (ubi->fm_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
710 ubi->fm_do_produce_anchor = 1;
713 * NULL assignment also prevents multiple wear level checks
716 wl_tree_add(ubi->fm_anchor, &ubi->free);
717 ubi->fm_anchor = NULL;
718 ubi->free_count++;
721 if (ubi->fm_do_produce_anchor) {
728 self_check_in_wl_tree(ubi, e1, &ubi->used);
729 rb_erase(&e1->u.rb, &ubi->used);
730 dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
731 ubi->fm_do_produce_anchor = 0;
732 } else if (!ubi->scrub.rb_node) {
734 if (!ubi->scrub.rb_node) {
737 * Now pick the least worn-out used physical eraseblock and a
738 * highly worn-out free physical eraseblock. If the erase
739 * counters differ much enough, start wear-leveling.
741 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
746 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
748 e1->ec, e2->ec);
751 wl_tree_add(e2, &ubi->free);
752 ubi->free_count++;
755 self_check_in_wl_tree(ubi, e1, &ubi->used);
756 rb_erase(&e1->u.rb, &ubi->used);
758 e1->pnum, e1->ec, e2->pnum, e2->ec);
762 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
767 self_check_in_wl_tree(ubi, e1, &ubi->scrub);
768 rb_erase(&e1->u.rb, &ubi->scrub);
769 dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
772 ubi->move_from = e1;
773 ubi->move_to = e2;
774 spin_unlock(&ubi->wl_lock);
777 * Now we are going to copy physical eraseblock @e1->pnum to @e2->pnum.
787 err = ubi_io_read_vid_hdr(ubi, e1->pnum, vidb, 0);
801 dbg_wl("PEB %d has no VID header", e1->pnum);
806 * The same situation as %UBI_IO_FF, but bit-flips were
810 dbg_wl("PEB %d has no VID header but has bit-flips",
811 e1->pnum);
814 } else if (ubi->fast_attach && err == UBI_IO_BAD_HDR_EBADMSG) {
821 e1->pnum);
827 err, e1->pnum);
831 vol_id = be32_to_cpu(vid_hdr->vol_id);
832 lnum = be32_to_cpu(vid_hdr->lnum);
834 err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vidb);
841 * wear-leveling movement again, so put it to the
856 * Target PEB had bit-flips or write error - torture it.
869 * put this PEB to the @ubi->erroneous list to prevent
872 if (ubi->erroneous_peb_count > ubi->max_erroneous) {
874 ubi->erroneous_peb_count);
891 e1->pnum, vol_id, lnum, e2->pnum);
894 spin_lock(&ubi->wl_lock);
895 if (!ubi->move_to_put) {
896 wl_tree_add(e2, &ubi->used);
899 ubi->move_from = ubi->move_to = NULL;
900 ubi->move_to_put = ubi->wl_scheduled = 0;
901 spin_unlock(&ubi->wl_lock);
906 spin_lock(&ubi->wl_lock);
908 spin_unlock(&ubi->wl_lock);
919 e2->pnum, vol_id, lnum);
926 mutex_unlock(&ubi->move_mutex);
927 up_read(&ubi->fm_eba_sem);
936 if (vol_id != -1)
938 e1->pnum, vol_id, lnum, e2->pnum, err);
941 e1->pnum, e2->pnum, err);
942 spin_lock(&ubi->wl_lock);
946 wl_tree_add(e1, &ubi->erroneous);
947 ubi->erroneous_peb_count += 1;
949 wl_tree_add(e1, &ubi->scrub);
951 wl_tree_add(e1, &ubi->used);
953 wl_tree_add(e2, &ubi->free);
954 ubi->free_count++;
957 ubi_assert(!ubi->move_to_put);
958 ubi->move_from = ubi->move_to = NULL;
959 ubi->wl_scheduled = 0;
960 spin_unlock(&ubi->wl_lock);
977 mutex_unlock(&ubi->move_mutex);
978 up_read(&ubi->fm_eba_sem);
982 if (vol_id != -1)
984 err, e1->pnum, e2->pnum);
987 err, e1->pnum, vol_id, lnum, e2->pnum);
988 spin_lock(&ubi->wl_lock);
989 ubi->move_from = ubi->move_to = NULL;
990 ubi->move_to_put = ubi->wl_scheduled = 0;
993 spin_unlock(&ubi->wl_lock);
999 mutex_unlock(&ubi->move_mutex);
1000 up_read(&ubi->fm_eba_sem);
1002 return err < 0 ? err : -EIO;
1005 ubi->wl_scheduled = 0;
1006 spin_unlock(&ubi->wl_lock);
1007 mutex_unlock(&ubi->move_mutex);
1008 up_read(&ubi->fm_eba_sem);
1014 * ensure_wear_leveling - schedule wear-leveling if it is needed.
1016 * @nested: set to non-zero if this function is called from UBI worker
1018 * This function checks if it is time to start wear-leveling and schedules it
1027 spin_lock(&ubi->wl_lock);
1028 if (ubi->wl_scheduled)
1029 /* Wear-leveling is already in the work queue */
1033 * If the ubi->scrub tree is not empty, scrubbing is needed, and the
1036 if (!ubi->scrub.rb_node) {
1044 if (!ubi->used.rb_node || !ubi->free.rb_node)
1045 /* No physical eraseblocks - no deal */
1049 * We schedule wear-leveling only if the difference between the
1054 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
1055 e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
1057 if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
1060 dbg_wl("schedule wear-leveling");
1064 ubi->wl_scheduled = 1;
1065 spin_unlock(&ubi->wl_lock);
1069 err = -ENOMEM;
1073 wrk->func = &wear_leveling_worker;
1081 spin_lock(&ubi->wl_lock);
1082 ubi->wl_scheduled = 0;
1084 spin_unlock(&ubi->wl_lock);
1089 * __erase_worker - physical eraseblock erase worker function.
1100 struct ubi_wl_entry *e = wl_wrk->e;
1101 int pnum = e->pnum;
1102 int vol_id = wl_wrk->vol_id;
1103 int lnum = wl_wrk->lnum;
1107 pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
1109 err = ubi_sync_erase(ubi, e, wl_wrk->torture);
1111 spin_lock(&ubi->wl_lock);
1113 if (!ubi->fm_disabled && !ubi->fm_anchor &&
1114 e->pnum < UBI_FM_MAX_START) {
1117 * enabled again in the wear leveling started below.
1119 ubi->fm_anchor = e;
1120 ubi->fm_do_produce_anchor = 0;
1122 wl_tree_add(e, &ubi->free);
1123 ubi->free_count++;
1126 spin_unlock(&ubi->wl_lock);
1134 /* And take care about wear-leveling */
1141 if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
1142 err == -EBUSY) {
1145 /* Re-schedule the LEB for erasure */
1148 spin_lock(&ubi->wl_lock);
1150 spin_unlock(&ubi->wl_lock);
1157 spin_lock(&ubi->wl_lock);
1159 spin_unlock(&ubi->wl_lock);
1160 if (err != -EIO)
1162 * If this is not %-EIO, we have no idea what to do. Scheduling
1168 /* It is %-EIO, the PEB went bad */
1170 if (!ubi->bad_allowed) {
1175 spin_lock(&ubi->volumes_lock);
1176 if (ubi->beb_rsvd_pebs == 0) {
1177 if (ubi->avail_pebs == 0) {
1178 spin_unlock(&ubi->volumes_lock);
1182 ubi->avail_pebs -= 1;
1185 spin_unlock(&ubi->volumes_lock);
1192 spin_lock(&ubi->volumes_lock);
1193 if (ubi->beb_rsvd_pebs > 0) {
1199 ubi->avail_pebs += 1;
1202 ubi->beb_rsvd_pebs -= 1;
1204 ubi->bad_peb_count += 1;
1205 ubi->good_peb_count -= 1;
1209 else if (ubi->beb_rsvd_pebs)
1211 ubi->beb_rsvd_pebs);
1214 spin_unlock(&ubi->volumes_lock);
1220 spin_lock(&ubi->volumes_lock);
1221 ubi->avail_pebs += 1;
1222 spin_unlock(&ubi->volumes_lock);
1234 struct ubi_wl_entry *e = wl_wrk->e;
1236 dbg_wl("cancel erasure of PEB %d EC %d", e->pnum, e->ec);
1248 * ubi_wl_put_peb - return a PEB to the wear-leveling sub-system.
1268 ubi_assert(pnum < ubi->peb_count);
1270 down_read(&ubi->fm_protect);
1273 spin_lock(&ubi->wl_lock);
1274 e = ubi->lookuptbl[pnum];
1278 * process (eg. wear leveling worker), corresponding process
1283 spin_unlock(&ubi->wl_lock);
1284 up_read(&ubi->fm_protect);
1287 if (e == ubi->move_from) {
1291 * wear-leveling worker.
1294 spin_unlock(&ubi->wl_lock);
1296 /* Wait for the WL worker by taking the @ubi->move_mutex */
1297 mutex_lock(&ubi->move_mutex);
1298 mutex_unlock(&ubi->move_mutex);
1300 } else if (e == ubi->move_to) {
1304 * sub-system already re-mapped the LEB in 'ubi_eba_copy_leb()'
1305 * but the WL sub-system has not put the PEB to the "used" tree
1311 ubi_assert(!ubi->move_to_put);
1312 ubi->move_to_put = 1;
1313 spin_unlock(&ubi->wl_lock);
1314 up_read(&ubi->fm_protect);
1317 if (in_wl_tree(e, &ubi->used)) {
1318 self_check_in_wl_tree(ubi, e, &ubi->used);
1319 rb_erase(&e->u.rb, &ubi->used);
1320 } else if (in_wl_tree(e, &ubi->scrub)) {
1321 self_check_in_wl_tree(ubi, e, &ubi->scrub);
1322 rb_erase(&e->u.rb, &ubi->scrub);
1323 } else if (in_wl_tree(e, &ubi->erroneous)) {
1324 self_check_in_wl_tree(ubi, e, &ubi->erroneous);
1325 rb_erase(&e->u.rb, &ubi->erroneous);
1326 ubi->erroneous_peb_count -= 1;
1327 ubi_assert(ubi->erroneous_peb_count >= 0);
1331 err = prot_queue_del(ubi, e->pnum);
1335 spin_unlock(&ubi->wl_lock);
1336 up_read(&ubi->fm_protect);
1341 spin_unlock(&ubi->wl_lock);
1345 spin_lock(&ubi->wl_lock);
1346 wl_tree_add(e, &ubi->used);
1347 spin_unlock(&ubi->wl_lock);
1350 up_read(&ubi->fm_protect);
1355 * ubi_wl_scrub_peb - schedule a physical eraseblock for scrubbing.
1359 * If a bit-flip in a physical eraseblock is detected, this physical eraseblock
1371 spin_lock(&ubi->wl_lock);
1372 e = ubi->lookuptbl[pnum];
1373 if (e == ubi->move_from || in_wl_tree(e, &ubi->scrub) ||
1374 in_wl_tree(e, &ubi->erroneous)) {
1375 spin_unlock(&ubi->wl_lock);
1379 if (e == ubi->move_to) {
1386 spin_unlock(&ubi->wl_lock);
1392 if (in_wl_tree(e, &ubi->used)) {
1393 self_check_in_wl_tree(ubi, e, &ubi->used);
1394 rb_erase(&e->u.rb, &ubi->used);
1398 err = prot_queue_del(ubi, e->pnum);
1402 spin_unlock(&ubi->wl_lock);
1407 wl_tree_add(e, &ubi->scrub);
1408 spin_unlock(&ubi->wl_lock);
1411 * Technically scrubbing is the same as wear-leveling, so it is done
1418 * ubi_wl_flush - flush all pending works.
1439 vol_id, lnum, ubi->works_count);
1445 down_read(&ubi->work_sem);
1446 spin_lock(&ubi->wl_lock);
1447 list_for_each_entry_safe(wrk, tmp, &ubi->works, list) {
1448 if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
1449 (lnum == UBI_ALL || wrk->lnum == lnum)) {
1450 list_del(&wrk->list);
1451 ubi->works_count -= 1;
1452 ubi_assert(ubi->works_count >= 0);
1453 spin_unlock(&ubi->wl_lock);
1455 err = wrk->func(ubi, wrk, 0);
1457 up_read(&ubi->work_sem);
1461 spin_lock(&ubi->wl_lock);
1466 spin_unlock(&ubi->wl_lock);
1467 up_read(&ubi->work_sem);
1474 down_write(&ubi->work_sem);
1475 up_write(&ubi->work_sem);
1482 if (in_wl_tree(e, &ubi->scrub))
1484 else if (in_wl_tree(e, &ubi->erroneous))
1486 else if (ubi->move_from == e)
1488 else if (ubi->move_to == e)
1495 * ubi_bitflip_check - Check an eraseblock for bitflips and scrub it if needed.
1518 if (pnum < 0 || pnum >= ubi->peb_count) {
1519 err = -EINVAL;
1527 down_write(&ubi->work_sem);
1533 spin_lock(&ubi->wl_lock);
1534 e = ubi->lookuptbl[pnum];
1536 spin_unlock(&ubi->wl_lock);
1537 err = -ENOENT;
1545 spin_unlock(&ubi->wl_lock);
1546 err = -EBUSY;
1549 spin_unlock(&ubi->wl_lock);
1552 mutex_lock(&ubi->buf_mutex);
1553 err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
1554 mutex_unlock(&ubi->buf_mutex);
1561 spin_lock(&ubi->wl_lock);
1567 e = ubi->lookuptbl[pnum];
1569 spin_unlock(&ubi->wl_lock);
1570 err = -ENOENT;
1575 * Need to re-check state
1578 spin_unlock(&ubi->wl_lock);
1579 err = -EBUSY;
1584 prot_queue_del(ubi, e->pnum);
1585 wl_tree_add(e, &ubi->scrub);
1586 spin_unlock(&ubi->wl_lock);
1589 } else if (in_wl_tree(e, &ubi->used)) {
1590 rb_erase(&e->u.rb, &ubi->used);
1591 wl_tree_add(e, &ubi->scrub);
1592 spin_unlock(&ubi->wl_lock);
1595 } else if (in_wl_tree(e, &ubi->free)) {
1596 rb_erase(&e->u.rb, &ubi->free);
1597 ubi->free_count--;
1598 spin_unlock(&ubi->wl_lock);
1602 * erasure right away. No wear leveling needed.
1607 spin_unlock(&ubi->wl_lock);
1608 err = -EAGAIN;
1612 err = -EUCLEAN;
1618 up_write(&ubi->work_sem);
1625 * tree_destroy - destroy an RB-tree.
1634 rb = root->rb_node;
1636 if (rb->rb_left)
1637 rb = rb->rb_left;
1638 else if (rb->rb_right)
1639 rb = rb->rb_right;
1645 if (rb->rb_left == &e->u.rb)
1646 rb->rb_left = NULL;
1648 rb->rb_right = NULL;
1657 * ubi_thread - UBI background thread.
1666 ubi->bgt_name, task_pid_nr(current));
1678 spin_lock(&ubi->wl_lock);
1679 if (list_empty(&ubi->works) || ubi->ro_mode ||
1680 !ubi->thread_enabled || ubi_dbg_is_bgt_disabled(ubi)) {
1682 spin_unlock(&ubi->wl_lock);
1699 spin_unlock(&ubi->wl_lock);
1704 ubi->bgt_name, err);
1708 * switch to read-only mode.
1711 ubi->bgt_name, WL_MAX_FAILURES);
1713 ubi->thread_enabled = 0;
1722 dbg_wl("background thread \"%s\" is killed", ubi->bgt_name);
1723 ubi->thread_enabled = 0;
1728 * shutdown_work - shutdown all pending works.
1733 while (!list_empty(&ubi->works)) {
1736 wrk = list_entry(ubi->works.next, struct ubi_work, list);
1737 list_del(&wrk->list);
1738 wrk->func(ubi, wrk, 1);
1739 ubi->works_count -= 1;
1740 ubi_assert(ubi->works_count >= 0);
1745 * erase_aeb - erase a PEB given in UBI attach info PEB
1757 return -ENOMEM;
1759 e->pnum = aeb->pnum;
1760 e->ec = aeb->ec;
1761 ubi->lookuptbl[e->pnum] = e;
1768 wl_tree_add(e, &ubi->free);
1769 ubi->free_count++;
1771 err = schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0, false);
1785 * ubi_wl_init - initialize the WL sub-system using attaching information.
1800 ubi->used = ubi->erroneous = ubi->free = ubi->scrub = RB_ROOT;
1801 spin_lock_init(&ubi->wl_lock);
1802 mutex_init(&ubi->move_mutex);
1803 init_rwsem(&ubi->work_sem);
1804 ubi->max_ec = ai->max_ec;
1805 INIT_LIST_HEAD(&ubi->works);
1807 sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
1809 err = -ENOMEM;
1810 ubi->lookuptbl = kcalloc(ubi->peb_count, sizeof(void *), GFP_KERNEL);
1811 if (!ubi->lookuptbl)
1815 INIT_LIST_HEAD(&ubi->pq[i]);
1816 ubi->pq_head = 0;
1818 ubi->free_count = 0;
1819 list_for_each_entry_safe(aeb, tmp, &ai->erase, u.list) {
1829 list_for_each_entry(aeb, &ai->free, u.list) {
1834 err = -ENOMEM;
1838 e->pnum = aeb->pnum;
1839 e->ec = aeb->ec;
1840 ubi_assert(e->ec >= 0);
1842 wl_tree_add(e, &ubi->free);
1843 ubi->free_count++;
1845 ubi->lookuptbl[e->pnum] = e;
1850 ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
1851 ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
1856 err = -ENOMEM;
1860 e->pnum = aeb->pnum;
1861 e->ec = aeb->ec;
1862 ubi->lookuptbl[e->pnum] = e;
1864 if (!aeb->scrub) {
1866 e->pnum, e->ec);
1867 wl_tree_add(e, &ubi->used);
1870 e->pnum, e->ec);
1871 wl_tree_add(e, &ubi->scrub);
1878 list_for_each_entry(aeb, &ai->fastmap, u.list) {
1881 e = ubi_find_fm_block(ubi, aeb->pnum);
1884 ubi_assert(!ubi->lookuptbl[e->pnum]);
1885 ubi->lookuptbl[e->pnum] = e;
1895 if (ubi->lookuptbl[aeb->pnum])
1907 if (aeb->vol_id == UBI_FM_SB_VOLUME_ID)
1920 ubi_assert(ubi->good_peb_count == found_pebs);
1925 if (ubi->avail_pebs < reserved_pebs) {
1927 ubi->avail_pebs, reserved_pebs);
1928 if (ubi->corr_peb_count)
1930 ubi->corr_peb_count);
1931 err = -ENOSPC;
1934 ubi->avail_pebs -= reserved_pebs;
1935 ubi->rsvd_pebs += reserved_pebs;
1937 /* Schedule wear-leveling if needed */
1943 if (!ubi->ro_mode && !ubi->fm_disabled)
1950 tree_destroy(ubi, &ubi->used);
1951 tree_destroy(ubi, &ubi->free);
1952 tree_destroy(ubi, &ubi->scrub);
1953 kfree(ubi->lookuptbl);
1958 * protection_queue_destroy - destroy the protection queue.
1967 list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
1968 list_del(&e->u.list);
1975 * ubi_wl_close - close the wear-leveling sub-system.
1980 dbg_wl("close the WL sub-system");
1984 tree_destroy(ubi, &ubi->used);
1985 tree_destroy(ubi, &ubi->erroneous);
1986 tree_destroy(ubi, &ubi->free);
1987 tree_destroy(ubi, &ubi->scrub);
1988 kfree(ubi->lookuptbl);
1992 * self_check_ec - make sure that the erase counter of a PEB is correct.
2010 ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
2012 return -ENOMEM;
2021 read_ec = be64_to_cpu(ec_hdr->ec);
2022 if (ec != read_ec && read_ec - ec > 1) {
2023 ubi_err(ubi, "self-check failed for PEB %d", pnum);
2036 * self_check_in_wl_tree - check that wear-leveling entry is in WL RB-tree.
2038 * @e: the wear-leveling entry to check
2041 * This function returns zero if @e is in the @root RB-tree and %-EINVAL if it
2053 ubi_err(ubi, "self-check failed for PEB %d, EC %d, RB-tree %p ",
2054 e->pnum, e->ec, root);
2056 return -EINVAL;
2060 * self_check_in_pq - check if wear-leveling entry is in the protection
2063 * @e: the wear-leveling entry to check
2065 * This function returns zero if @e is in @ubi->pq and %-EINVAL if it is not.
2076 ubi_err(ubi, "self-check failed for PEB %d, EC %d, Protect queue",
2077 e->pnum, e->ec);
2079 return -EINVAL;
2086 e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF, 0);
2087 self_check_in_wl_tree(ubi, e, &ubi->free);
2088 ubi->free_count--;
2089 ubi_assert(ubi->free_count >= 0);
2090 rb_erase(&e->u.rb, &ubi->free);
2096 * produce_free_peb - produce a free physical eraseblock.
2108 while (!ubi->free.rb_node && ubi->works_count) {
2109 spin_unlock(&ubi->wl_lock);
2114 spin_lock(&ubi->wl_lock);
2123 * ubi_wl_get_peb - get a physical eraseblock.
2128 * Returns with ubi->fm_eba_sem held in read mode!
2136 down_read(&ubi->fm_eba_sem);
2137 spin_lock(&ubi->wl_lock);
2138 if (!ubi->free.rb_node) {
2139 if (ubi->works_count == 0) {
2141 ubi_assert(list_empty(&ubi->works));
2142 spin_unlock(&ubi->wl_lock);
2143 return -ENOSPC;
2148 spin_unlock(&ubi->wl_lock);
2151 spin_unlock(&ubi->wl_lock);
2152 up_read(&ubi->fm_eba_sem);
2158 spin_unlock(&ubi->wl_lock);
2160 err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
2161 ubi->peb_size - ubi->vid_hdr_aloffset);
2163 ubi_err(ubi, "new PEB %d does not contain all 0xFF bytes", e->pnum);
2167 return e->pnum;
2170 #include "fastmap-wl.c"