Commit 4b68bf9a authored by Arne Edholm's avatar Arne Edholm Committed by Richard Weinberger

ubi: Select fastmap anchor PEBs considering wear level rules

There is a risk that the fastmap anchor PEB is alternating between
just two PEBs, the current anchor and the previous anchor that was just
deleted. As the fastmap pools gets the first take on free PEBs, the
pools may leave no free PEBs to be selected as the new anchor,
resulting in the two PEBs alternating behaviour. If the anchor PEBs gets
a high erase count the PEBs will not be used by the pools but remain in
ubi->free, even more increasing the likelihood they will be used as
anchors.

Getting stuck using only a couple of PEBs continuously will result in an
uneven wear, eventually leading to failure.

To fix this:

- Choose the fastmap anchor when the most free PEBs are available. This is
  during rebuilding of the fastmap pools, after the unused pool PEBs are
  added to ubi->free but before the pools are populated again from the
  free PEBs. Also reserve an additional second best PEB as a candidate
  for the next time the fast map anchor is updated. If a better PEB is
  found the next time the fast map anchor is updated, the candidate is
  made available for building the pools.

- Enable anchor move within the anchor area again as it is useful for
  distributing wear.

- The anchor candidate for the next fastmap update is the most suited free
  PEB. Check this PEB's erase count during wear leveling. If the wear
  leveling limit is exceeded, the PEB is considered unsuitable for now. As
  all other non used anchor area PEBs should be even worse, free up the
  used anchor area PEB with the lowest erase count.
Signed-off-by: default avatarArne Edholm <arne.edholm@axis.com>
Signed-off-by: default avatarRichard Weinberger <richard@nod.at>
parent 3d77e6a8
...@@ -116,6 +116,21 @@ void ubi_refill_pools(struct ubi_device *ubi) ...@@ -116,6 +116,21 @@ void ubi_refill_pools(struct ubi_device *ubi)
wl_pool->size = 0; wl_pool->size = 0;
pool->size = 0; pool->size = 0;
if (ubi->fm_anchor) {
wl_tree_add(ubi->fm_anchor, &ubi->free);
ubi->free_count++;
}
if (ubi->fm_next_anchor) {
wl_tree_add(ubi->fm_next_anchor, &ubi->free);
ubi->free_count++;
}
/* All available PEBs are in ubi->free, now is the time to get
* the best anchor PEBs.
*/
ubi->fm_anchor = ubi_wl_get_fm_peb(ubi, 1);
ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
for (;;) { for (;;) {
enough = 0; enough = 0;
if (pool->size < pool->max_size) { if (pool->size < pool->max_size) {
...@@ -271,26 +286,20 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi) ...@@ -271,26 +286,20 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
int ubi_ensure_anchor_pebs(struct ubi_device *ubi) int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
{ {
struct ubi_work *wrk; struct ubi_work *wrk;
struct ubi_wl_entry *anchor;
spin_lock(&ubi->wl_lock); spin_lock(&ubi->wl_lock);
/* Do we already have an anchor? */ /* Do we have a next anchor? */
if (ubi->fm_anchor) { if (!ubi->fm_next_anchor) {
spin_unlock(&ubi->wl_lock); ubi->fm_next_anchor = ubi_wl_get_fm_peb(ubi, 1);
return 0; if (!ubi->fm_next_anchor)
} /* Tell wear leveling to produce a new anchor PEB */
ubi->fm_do_produce_anchor = 1;
/* See if we can find an anchor PEB on the list of free PEBs */
anchor = ubi_wl_get_fm_peb(ubi, 1);
if (anchor) {
ubi->fm_anchor = anchor;
spin_unlock(&ubi->wl_lock);
return 0;
} }
/* No luck, trigger wear leveling to produce a new anchor PEB */ /* Do wear leveling to get a new anchor PEB or check the
ubi->fm_do_produce_anchor = 1; * existing next anchor candidate.
*/
if (ubi->wl_scheduled) { if (ubi->wl_scheduled) {
spin_unlock(&ubi->wl_lock); spin_unlock(&ubi->wl_lock);
return 0; return 0;
......
...@@ -1220,6 +1220,17 @@ static int ubi_write_fastmap(struct ubi_device *ubi, ...@@ -1220,6 +1220,17 @@ static int ubi_write_fastmap(struct ubi_device *ubi,
fm_pos += sizeof(*fec); fm_pos += sizeof(*fec);
ubi_assert(fm_pos <= ubi->fm_size); ubi_assert(fm_pos <= ubi->fm_size);
} }
if (ubi->fm_next_anchor) {
fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fec->pnum = cpu_to_be32(ubi->fm_next_anchor->pnum);
set_seen(ubi, ubi->fm_next_anchor->pnum, seen_pebs);
fec->ec = cpu_to_be32(ubi->fm_next_anchor->ec);
free_peb_count++;
fm_pos += sizeof(*fec);
ubi_assert(fm_pos <= ubi->fm_size);
}
fmh->free_peb_count = cpu_to_be32(free_peb_count); fmh->free_peb_count = cpu_to_be32(free_peb_count);
ubi_for_each_used_peb(ubi, wl_e, tmp_rb) { ubi_for_each_used_peb(ubi, wl_e, tmp_rb) {
......
...@@ -491,7 +491,8 @@ struct ubi_debug_info { ...@@ -491,7 +491,8 @@ struct ubi_debug_info {
* @fm_work: fastmap work queue * @fm_work: fastmap work queue
* @fm_work_scheduled: non-zero if fastmap work was scheduled * @fm_work_scheduled: non-zero if fastmap work was scheduled
* @fast_attach: non-zero if UBI was attached by fastmap * @fast_attach: non-zero if UBI was attached by fastmap
* @fm_anchor: The next anchor PEB to use for fastmap * @fm_anchor: The new anchor PEB used during fastmap update
* @fm_next_anchor: An anchor PEB candidate for the next time fastmap is updated
* @fm_do_produce_anchor: If true produce an anchor PEB in wl * @fm_do_produce_anchor: If true produce an anchor PEB in wl
* *
* @used: RB-tree of used physical eraseblocks * @used: RB-tree of used physical eraseblocks
...@@ -602,6 +603,7 @@ struct ubi_device { ...@@ -602,6 +603,7 @@ struct ubi_device {
int fm_work_scheduled; int fm_work_scheduled;
int fast_attach; int fast_attach;
struct ubi_wl_entry *fm_anchor; struct ubi_wl_entry *fm_anchor;
struct ubi_wl_entry *fm_next_anchor;
int fm_do_produce_anchor; int fm_do_produce_anchor;
/* Wear-leveling sub-system's stuff */ /* Wear-leveling sub-system's stuff */
......
...@@ -687,20 +687,27 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, ...@@ -687,20 +687,27 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
} }
#ifdef CONFIG_MTD_UBI_FASTMAP #ifdef CONFIG_MTD_UBI_FASTMAP
e1 = find_anchor_wl_entry(&ubi->used);
if (e1 && ubi->fm_next_anchor &&
(ubi->fm_next_anchor->ec - e1->ec >= UBI_WL_THRESHOLD)) {
ubi->fm_do_produce_anchor = 1;
/* fm_next_anchor is no longer considered a good anchor
* candidate.
* NULL assignment also prevents multiple wear level checks
* of this PEB.
*/
wl_tree_add(ubi->fm_next_anchor, &ubi->free);
ubi->fm_next_anchor = NULL;
ubi->free_count++;
}
if (ubi->fm_do_produce_anchor) { if (ubi->fm_do_produce_anchor) {
e1 = find_anchor_wl_entry(&ubi->used);
if (!e1) if (!e1)
goto out_cancel; goto out_cancel;
e2 = get_peb_for_wl(ubi); e2 = get_peb_for_wl(ubi);
if (!e2) if (!e2)
goto out_cancel; goto out_cancel;
/*
* Anchor move within the anchor area is useless.
*/
if (e2->pnum < UBI_FM_MAX_START)
goto out_cancel;
self_check_in_wl_tree(ubi, e1, &ubi->used); self_check_in_wl_tree(ubi, e1, &ubi->used);
rb_erase(&e1->u.rb, &ubi->used); rb_erase(&e1->u.rb, &ubi->used);
dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum); dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
...@@ -1079,8 +1086,11 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk) ...@@ -1079,8 +1086,11 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
if (!err) { if (!err) {
spin_lock(&ubi->wl_lock); spin_lock(&ubi->wl_lock);
if (!ubi->fm_anchor && e->pnum < UBI_FM_MAX_START) { if (!ubi->fm_next_anchor && e->pnum < UBI_FM_MAX_START) {
ubi->fm_anchor = e; /* Abort anchor production, if needed it will be
* enabled again in the wear leveling started below.
*/
ubi->fm_next_anchor = e;
ubi->fm_do_produce_anchor = 0; ubi->fm_do_produce_anchor = 0;
} else { } else {
wl_tree_add(e, &ubi->free); wl_tree_add(e, &ubi->free);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment