Commit e9eca4de authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'upstream-3.7-rc1-fastmap' of git://git.infradead.org/linux-ubi

Pull UBI fastmap changes from Artem Bityutskiy:
 "This pull request contains the UBI fastmap support implemented by
  Richard Weinberger from Linutronix.  Fastmap is designed to address
  UBI's slow scanning issues.  Namely, it introduces a new on-flash
  data-structure called "fastmap", which stores the information about
  logical<->physical eraseblocks mappings.  So now to get this
  information just read the fastmap, instead of doing full scan.  More
  information here can be found in Richard's announcement in LKML
  (Subject: UBI: Fastmap request for inclusion (v19)):

     http://thread.gmane.org/gmane.linux.kernel/1364922/focus=1369109

  One thing I want to explicitly say is that fastmap did not have large
  enough linux-next exposure.  It is partially my fault - I did not
  respond quickly enough.  I _really_ apologize for this.  But it had
  good testing and disabled by default, so I do not expect that we'll
  break anything.

  Fastmap is declared as experimental so far, and it is off by default.
  We did declare that the on-flash format may be changed.  The reason
  for this is that no one used it in real production so far, so there is
  a high risk that something is missing.  Besides, we do not have
  user-space tools supporting fastmap so far.

  Nevertheless, I suggest we merge this feature.  Many people want UBI's
  scanning bottleneck to be fixed and merging fastmap now should
  accelerate its production use.  The plan is to make it bullet-prove,
  somewhat clean-up, and make it the default for UBI.  I do not know how
  many kernel releases will it take.

  Basically, I what I want to do for fastmap is something like Linus did
  for btrfs few years ago."

* tag 'upstream-3.7-rc1-fastmap' of git://git.infradead.org/linux-ubi:
  UBI: Wire-up fastmap
  UBI: Add fastmap core
  UBI: Add fastmap support to the WL sub-system
  UBI: Add fastmap stuff to attach.c
  UBI: Wire-up ->fm_sem
  UBI: Add fastmap bits to build.c
  UBI: Add self_check_eba()
  UBI: Export next_sqnum()
  UBI: Add fastmap stuff to ubi.h
  UBI: Add fastmap on-flash data structures
parents 1929041b 76ac66e4
......@@ -7457,6 +7457,12 @@ F: drivers/mtd/ubi/
F: include/linux/mtd/ubi.h
F: include/mtd/ubi-user.h
UNSORTED BLOCK IMAGES (UBI) Fastmap
M: Richard Weinberger <richard@nod.at>
L: linux-mtd@lists.infradead.org
S: Maintained
F: drivers/mtd/ubi/fastmap.c
USB ACM DRIVER
M: Oliver Neukum <oliver@neukum.org>
L: linux-usb@vger.kernel.org
......
......@@ -56,6 +56,27 @@ config MTD_UBI_BEB_LIMIT
Leave the default value if unsure.
config MTD_UBI_FASTMAP
bool "UBI Fastmap (Experimental feature)"
default n
help
Important: this feature is experimental so far and the on-flash
format for fastmap may change in the next kernel versions
Fastmap is a mechanism which allows attaching an UBI device
in nearly constant time. Instead of scanning the whole MTD device it
only has to locate a checkpoint (called fastmap) on the device.
The on-flash fastmap contains all information needed to attach
the device. Using fastmap makes only sense on large devices where
attaching by scanning takes long. UBI will not automatically install
a fastmap on old images, but you can set the UBI module parameter
fm_autoconvert to 1 if you want so. Please note that fastmap-enabled
images are still usable with UBI implementations without
fastmap support. On typical flash devices the whole fastmap fits
into one PEB. UBI will reserve PEBs to hold two fastmaps.
If in doubt, say "N".
config MTD_UBI_GLUEBI
tristate "MTD devices emulation driver (gluebi)"
help
......
......@@ -2,5 +2,6 @@ obj-$(CONFIG_MTD_UBI) += ubi.o
ubi-y += vtbl.o vmt.o upd.o build.o cdev.o kapi.o eba.o io.o wl.o attach.o
ubi-y += misc.o debug.o
ubi-$(CONFIG_MTD_UBI_FASTMAP) += fastmap.o
obj-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
......@@ -300,7 +300,7 @@ static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
}
/**
* compare_lebs - find out which logical eraseblock is newer.
* ubi_compare_lebs - find out which logical eraseblock is newer.
* @ubi: UBI device description object
* @aeb: first logical eraseblock to compare
* @pnum: physical eraseblock number of the second logical eraseblock to
......@@ -319,7 +319,7 @@ static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
* o bit 2 is cleared: the older LEB is not corrupted;
* o bit 2 is set: the older LEB is corrupted.
*/
static int compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
int pnum, const struct ubi_vid_hdr *vid_hdr)
{
void *buf;
......@@ -337,7 +337,7 @@ static int compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
* support these images anymore. Well, those images still work,
* but only if no unclean reboots happened.
*/
ubi_err("unsupported on-flash UBI format\n");
ubi_err("unsupported on-flash UBI format");
return -EINVAL;
}
......@@ -507,7 +507,7 @@ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
* sequence numbers. We still can attach these images, unless
* there is a need to distinguish between old and new
* eraseblocks, in which case we'll refuse the image in
* 'compare_lebs()'. In other words, we attach old clean
* 'ubi_compare_lebs()'. In other words, we attach old clean
* images, but refuse attaching old images with duplicated
* logical eraseblocks because there was an unclean reboot.
*/
......@@ -523,7 +523,7 @@ int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
* Now we have to drop the older one and preserve the newer
* one.
*/
cmp_res = compare_lebs(ubi, aeb, pnum, vid_hdr);
cmp_res = ubi_compare_lebs(ubi, aeb, pnum, vid_hdr);
if (cmp_res < 0)
return cmp_res;
......@@ -748,7 +748,7 @@ struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
/**
* check_corruption - check the data area of PEB.
* @ubi: UBI device description object
* @vid_hrd: the (corrupted) VID header of this PEB
* @vid_hdr: the (corrupted) VID header of this PEB
* @pnum: the physical eraseblock number to check
*
* This is a helper function which is used to distinguish between VID header
......@@ -810,6 +810,8 @@ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
* @ubi: UBI device description object
* @ai: attaching information
* @pnum: the physical eraseblock number
* @vid: The volume ID of the found volume will be stored in this pointer
* @sqnum: The sqnum of the found volume will be stored in this pointer
*
* This function reads UBI headers of PEB @pnum, checks them, and adds
* information about this PEB to the corresponding list or RB-tree in the
......@@ -817,10 +819,10 @@ static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
* successfully handled and a negative error code in case of failure.
*/
static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
int pnum)
int pnum, int *vid, unsigned long long *sqnum)
{
long long uninitialized_var(ec);
int err, bitflips = 0, vol_id, ec_err = 0;
int err, bitflips = 0, vol_id = -1, ec_err = 0;
dbg_bld("scan PEB %d", pnum);
......@@ -991,14 +993,21 @@ static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
}
vol_id = be32_to_cpu(vidh->vol_id);
if (vid)
*vid = vol_id;
if (sqnum)
*sqnum = be64_to_cpu(vidh->sqnum);
if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
int lnum = be32_to_cpu(vidh->lnum);
/* Unsupported internal volume */
switch (vidh->compat) {
case UBI_COMPAT_DELETE:
ubi_msg("\"delete\" compatible internal volume %d:%d found, will remove it",
vol_id, lnum);
if (vol_id != UBI_FM_SB_VOLUME_ID
&& vol_id != UBI_FM_DATA_VOLUME_ID) {
ubi_msg("\"delete\" compatible internal volume %d:%d found, will remove it",
vol_id, lnum);
}
err = add_to_list(ai, pnum, vol_id, lnum,
ec, 1, &ai->erase);
if (err)
......@@ -1120,52 +1129,127 @@ static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
return 0;
}
/**
* destroy_av - free volume attaching information.
* @av: volume attaching information
* @ai: attaching information
*
* This function destroys the volume attaching information.
*/
static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
{
struct ubi_ainf_peb *aeb;
struct rb_node *this = av->root.rb_node;
while (this) {
if (this->rb_left)
this = this->rb_left;
else if (this->rb_right)
this = this->rb_right;
else {
aeb = rb_entry(this, struct ubi_ainf_peb, u.rb);
this = rb_parent(this);
if (this) {
if (this->rb_left == &aeb->u.rb)
this->rb_left = NULL;
else
this->rb_right = NULL;
}
kmem_cache_free(ai->aeb_slab_cache, aeb);
}
}
kfree(av);
}
/**
* destroy_ai - destroy attaching information.
* @ai: attaching information
*/
static void destroy_ai(struct ubi_attach_info *ai)
{
struct ubi_ainf_peb *aeb, *aeb_tmp;
struct ubi_ainf_volume *av;
struct rb_node *rb;
list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) {
list_del(&aeb->u.list);
kmem_cache_free(ai->aeb_slab_cache, aeb);
}
list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) {
list_del(&aeb->u.list);
kmem_cache_free(ai->aeb_slab_cache, aeb);
}
list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) {
list_del(&aeb->u.list);
kmem_cache_free(ai->aeb_slab_cache, aeb);
}
list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) {
list_del(&aeb->u.list);
kmem_cache_free(ai->aeb_slab_cache, aeb);
}
/* Destroy the volume RB-tree */
rb = ai->volumes.rb_node;
while (rb) {
if (rb->rb_left)
rb = rb->rb_left;
else if (rb->rb_right)
rb = rb->rb_right;
else {
av = rb_entry(rb, struct ubi_ainf_volume, rb);
rb = rb_parent(rb);
if (rb) {
if (rb->rb_left == &av->rb)
rb->rb_left = NULL;
else
rb->rb_right = NULL;
}
destroy_av(ai, av);
}
}
if (ai->aeb_slab_cache)
kmem_cache_destroy(ai->aeb_slab_cache);
kfree(ai);
}
/**
* scan_all - scan entire MTD device.
* @ubi: UBI device description object
* @ai: attach info object
* @start: start scanning at this PEB
*
* This function does full scanning of an MTD device and returns complete
* information about it in form of a "struct ubi_attach_info" object. In case
* of failure, an error code is returned.
*/
static struct ubi_attach_info *scan_all(struct ubi_device *ubi)
static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
int start)
{
int err, pnum;
struct rb_node *rb1, *rb2;
struct ubi_ainf_volume *av;
struct ubi_ainf_peb *aeb;
struct ubi_attach_info *ai;
ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
if (!ai)
return ERR_PTR(-ENOMEM);
INIT_LIST_HEAD(&ai->corr);
INIT_LIST_HEAD(&ai->free);
INIT_LIST_HEAD(&ai->erase);
INIT_LIST_HEAD(&ai->alien);
ai->volumes = RB_ROOT;
err = -ENOMEM;
ai->aeb_slab_cache = kmem_cache_create("ubi_aeb_slab_cache",
sizeof(struct ubi_ainf_peb),
0, 0, NULL);
if (!ai->aeb_slab_cache)
goto out_ai;
ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ech)
goto out_ai;
return err;
vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
if (!vidh)
goto out_ech;
for (pnum = 0; pnum < ubi->peb_count; pnum++) {
for (pnum = start; pnum < ubi->peb_count; pnum++) {
cond_resched();
dbg_gen("process PEB %d", pnum);
err = scan_peb(ubi, ai, pnum);
err = scan_peb(ubi, ai, pnum, NULL, NULL);
if (err < 0)
goto out_vidh;
}
......@@ -1210,32 +1294,144 @@ static struct ubi_attach_info *scan_all(struct ubi_device *ubi)
ubi_free_vid_hdr(ubi, vidh);
kfree(ech);
return ai;
return 0;
out_vidh:
ubi_free_vid_hdr(ubi, vidh);
out_ech:
kfree(ech);
out_ai:
ubi_destroy_ai(ai);
return ERR_PTR(err);
return err;
}
#ifdef CONFIG_MTD_UBI_FASTMAP
/**
* scan_fastmap - try to find a fastmap and attach from it.
* @ubi: UBI device description object
* @ai: attach info object
*
* Returns 0 on success, negative return values indicate an internal
* error.
* UBI_NO_FASTMAP denotes that no fastmap was found.
* UBI_BAD_FASTMAP denotes that the found fastmap was invalid.
*/
static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
int err, pnum, fm_anchor = -1;
unsigned long long max_sqnum = 0;
err = -ENOMEM;
ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ech)
goto out;
vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
if (!vidh)
goto out_ech;
for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
int vol_id = -1;
unsigned long long sqnum = -1;
cond_resched();
dbg_gen("process PEB %d", pnum);
err = scan_peb(ubi, ai, pnum, &vol_id, &sqnum);
if (err < 0)
goto out_vidh;
if (vol_id == UBI_FM_SB_VOLUME_ID && sqnum > max_sqnum) {
max_sqnum = sqnum;
fm_anchor = pnum;
}
}
ubi_free_vid_hdr(ubi, vidh);
kfree(ech);
if (fm_anchor < 0)
return UBI_NO_FASTMAP;
return ubi_scan_fastmap(ubi, ai, fm_anchor);
out_vidh:
ubi_free_vid_hdr(ubi, vidh);
out_ech:
kfree(ech);
out:
return err;
}
#endif
static struct ubi_attach_info *alloc_ai(const char *slab_name)
{
struct ubi_attach_info *ai;
ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
if (!ai)
return ai;
INIT_LIST_HEAD(&ai->corr);
INIT_LIST_HEAD(&ai->free);
INIT_LIST_HEAD(&ai->erase);
INIT_LIST_HEAD(&ai->alien);
ai->volumes = RB_ROOT;
ai->aeb_slab_cache = kmem_cache_create(slab_name,
sizeof(struct ubi_ainf_peb),
0, 0, NULL);
if (!ai->aeb_slab_cache) {
kfree(ai);
ai = NULL;
}
return ai;
}
/**
* ubi_attach - attach an MTD device.
* @ubi: UBI device descriptor
* @force_scan: if set to non-zero attach by scanning
*
* This function returns zero in case of success and a negative error code in
* case of failure.
*/
int ubi_attach(struct ubi_device *ubi)
int ubi_attach(struct ubi_device *ubi, int force_scan)
{
int err;
struct ubi_attach_info *ai;
ai = scan_all(ubi);
if (IS_ERR(ai))
return PTR_ERR(ai);
ai = alloc_ai("ubi_aeb_slab_cache");
if (!ai)
return -ENOMEM;
#ifdef CONFIG_MTD_UBI_FASTMAP
/* On small flash devices we disable fastmap in any case. */
if ((int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) <= UBI_FM_MAX_START) {
ubi->fm_disabled = 1;
force_scan = 1;
}
if (force_scan)
err = scan_all(ubi, ai, 0);
else {
err = scan_fast(ubi, ai);
if (err > 0) {
if (err != UBI_NO_FASTMAP) {
destroy_ai(ai);
ai = alloc_ai("ubi_aeb_slab_cache2");
if (!ai)
return -ENOMEM;
}
err = scan_all(ubi, ai, UBI_FM_MAX_START);
}
}
#else
err = scan_all(ubi, ai, 0);
#endif
if (err)
goto out_ai;
ubi->bad_peb_count = ai->bad_peb_count;
ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
......@@ -1256,7 +1452,29 @@ int ubi_attach(struct ubi_device *ubi)
if (err)
goto out_wl;
ubi_destroy_ai(ai);
#ifdef CONFIG_MTD_UBI_FASTMAP
if (ubi->fm && ubi->dbg->chk_gen) {
struct ubi_attach_info *scan_ai;
scan_ai = alloc_ai("ubi_ckh_aeb_slab_cache");
if (!scan_ai)
goto out_wl;
err = scan_all(ubi, scan_ai, 0);
if (err) {
destroy_ai(scan_ai);
goto out_wl;
}
err = self_check_eba(ubi, ai, scan_ai);
destroy_ai(scan_ai);
if (err)
goto out_wl;
}
#endif
destroy_ai(ai);
return 0;
out_wl:
......@@ -1265,98 +1483,10 @@ int ubi_attach(struct ubi_device *ubi)
ubi_free_internal_volumes(ubi);
vfree(ubi->vtbl);
out_ai:
ubi_destroy_ai(ai);
destroy_ai(ai);
return err;
}
/**
* destroy_av - free volume attaching information.
* @av: volume attaching information
* @ai: attaching information
*
* This function destroys the volume attaching information.
*/
static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
{
struct ubi_ainf_peb *aeb;
struct rb_node *this = av->root.rb_node;
while (this) {
if (this->rb_left)
this = this->rb_left;
else if (this->rb_right)
this = this->rb_right;
else {
aeb = rb_entry(this, struct ubi_ainf_peb, u.rb);
this = rb_parent(this);
if (this) {
if (this->rb_left == &aeb->u.rb)
this->rb_left = NULL;
else
this->rb_right = NULL;
}
kmem_cache_free(ai->aeb_slab_cache, aeb);
}
}
kfree(av);
}
/**
* ubi_destroy_ai - destroy attaching information.
* @ai: attaching information
*/
void ubi_destroy_ai(struct ubi_attach_info *ai)
{
struct ubi_ainf_peb *aeb, *aeb_tmp;
struct ubi_ainf_volume *av;
struct rb_node *rb;
list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) {
list_del(&aeb->u.list);
kmem_cache_free(ai->aeb_slab_cache, aeb);
}
list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) {
list_del(&aeb->u.list);
kmem_cache_free(ai->aeb_slab_cache, aeb);
}
list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) {
list_del(&aeb->u.list);
kmem_cache_free(ai->aeb_slab_cache, aeb);
}
list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) {
list_del(&aeb->u.list);
kmem_cache_free(ai->aeb_slab_cache, aeb);
}
/* Destroy the volume RB-tree */
rb = ai->volumes.rb_node;
while (rb) {
if (rb->rb_left)
rb = rb->rb_left;
else if (rb->rb_right)
rb = rb->rb_right;
else {
av = rb_entry(rb, struct ubi_ainf_volume, rb);
rb = rb_parent(rb);
if (rb) {
if (rb->rb_left == &av->rb)
rb->rb_left = NULL;
else
rb->rb_right = NULL;
}
destroy_av(ai, av);
}
}
if (ai->aeb_slab_cache)
kmem_cache_destroy(ai->aeb_slab_cache);
kfree(ai);
}
/**
* self_check_ai - check the attaching information.
* @ubi: UBI device description object
......
......@@ -76,7 +76,10 @@ static int __initdata mtd_devs;
/* MTD devices specification parameters */
static struct mtd_dev_param __initdata mtd_dev_param[UBI_MAX_DEVICES];
#ifdef CONFIG_MTD_UBI_FASTMAP
/* UBI module parameter to enable fastmap automatically on non-fastmap images */
static bool fm_autoconvert;
#endif
/* Root UBI "class" object (corresponds to '/<sysfs>/class/ubi/') */
struct class *ubi_class;
......@@ -153,6 +156,19 @@ int ubi_volume_notify(struct ubi_device *ubi, struct ubi_volume *vol, int ntype)
ubi_do_get_device_info(ubi, &nt.di);
ubi_do_get_volume_info(ubi, vol, &nt.vi);
#ifdef CONFIG_MTD_UBI_FASTMAP
switch (ntype) {
case UBI_VOLUME_ADDED:
case UBI_VOLUME_REMOVED:
case UBI_VOLUME_RESIZED:
case UBI_VOLUME_RENAMED:
if (ubi_update_fastmap(ubi)) {
ubi_err("Unable to update fastmap!");
ubi_ro_mode(ubi);
}
}
#endif
return blocking_notifier_call_chain(&ubi_notifiers, ntype, &nt);
}
......@@ -918,10 +934,40 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
ubi->vid_hdr_offset = vid_hdr_offset;
ubi->autoresize_vol_id = -1;
#ifdef CONFIG_MTD_UBI_FASTMAP
ubi->fm_pool.used = ubi->fm_pool.size = 0;
ubi->fm_wl_pool.used = ubi->fm_wl_pool.size = 0;
/*
* fm_pool.max_size is 5% of the total number of PEBs but it's also
* between UBI_FM_MAX_POOL_SIZE and UBI_FM_MIN_POOL_SIZE.
*/
ubi->fm_pool.max_size = min(((int)mtd_div_by_eb(ubi->mtd->size,
ubi->mtd) / 100) * 5, UBI_FM_MAX_POOL_SIZE);
if (ubi->fm_pool.max_size < UBI_FM_MIN_POOL_SIZE)
ubi->fm_pool.max_size = UBI_FM_MIN_POOL_SIZE;
ubi->fm_wl_pool.max_size = UBI_FM_WL_POOL_SIZE;
ubi->fm_disabled = !fm_autoconvert;
if (!ubi->fm_disabled && (int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd)
<= UBI_FM_MAX_START) {
ubi_err("More than %i PEBs are needed for fastmap, sorry.",
UBI_FM_MAX_START);
ubi->fm_disabled = 1;
}
ubi_msg("default fastmap pool size: %d", ubi->fm_pool.max_size);
ubi_msg("default fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
#else
ubi->fm_disabled = 1;
#endif
mutex_init(&ubi->buf_mutex);
mutex_init(&ubi->ckvol_mutex);
mutex_init(&ubi->device_mutex);
spin_lock_init(&ubi->volumes_lock);
mutex_init(&ubi->fm_mutex);
init_rwsem(&ubi->fm_sem);
ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
......@@ -934,11 +980,17 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
if (!ubi->peb_buf)
goto out_free;
#ifdef CONFIG_MTD_UBI_FASTMAP
ubi->fm_size = ubi_calc_fm_size(ubi);
ubi->fm_buf = vzalloc(ubi->fm_size);
if (!ubi->fm_buf)
goto out_free;
#endif
err = ubi_debugging_init_dev(ubi);
if (err)
goto out_free;
err = ubi_attach(ubi);
err = ubi_attach(ubi, 0);
if (err) {
ubi_err("failed to attach mtd%d, error %d", mtd->index, err);
goto out_debugging;
......@@ -1012,6 +1064,7 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num,
ubi_debugging_exit_dev(ubi);
out_free:
vfree(ubi->peb_buf);
vfree(ubi->fm_buf);
if (ref)
put_device(&ubi->dev);
else
......@@ -1061,7 +1114,11 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
ubi_assert(ubi_num == ubi->ubi_num);
ubi_notify_all(ubi, UBI_VOLUME_REMOVED, NULL);
ubi_msg("detaching mtd%d from ubi%d", ubi->mtd->index, ubi_num);
#ifdef CONFIG_MTD_UBI_FASTMAP
/* If we don't write a new fastmap at detach time we lose all
* EC updates that have been made since the last written fastmap. */
ubi_update_fastmap(ubi);
#endif
/*
* Before freeing anything, we have to stop the background thread to
* prevent it from doing anything on this device while we are freeing.
......@@ -1077,12 +1134,14 @@ int ubi_detach_mtd_dev(int ubi_num, int anyway)
ubi_debugfs_exit_dev(ubi);
uif_close(ubi);
ubi_wl_close(ubi);
ubi_free_internal_volumes(ubi);
vfree(ubi->vtbl);
put_mtd_device(ubi->mtd);
ubi_debugging_exit_dev(ubi);
vfree(ubi->peb_buf);
vfree(ubi->fm_buf);
ubi_msg("mtd%d is detached from ubi%d", ubi->mtd->index, ubi->ubi_num);
put_device(&ubi->dev);
return 0;
......@@ -1404,7 +1463,10 @@ MODULE_PARM_DESC(mtd, "MTD devices to attach. Parameter format: mtd=<name|num|pa
"Example 2: mtd=content,1984 mtd=4 - attach MTD device with name \"content\" using VID header offset 1984, and MTD device number 4 with default VID header offset.\n"
"Example 3: mtd=/dev/mtd1,0,25 - attach MTD device /dev/mtd1 using default VID header offset and reserve 25*nand_size_in_blocks/1024 erase blocks for bad block handling.\n"
"\t(e.g. if the NAND *chipset* has 4096 PEB, 100 will be reserved for this UBI device).");
#ifdef CONFIG_MTD_UBI_FASTMAP
module_param(fm_autoconvert, bool, 0644);
MODULE_PARM_DESC(fm_autoconvert, "Set this parameter to enable fastmap automatically on images without a fastmap.");
#endif
MODULE_VERSION(__stringify(UBI_VERSION));
MODULE_DESCRIPTION("UBI - Unsorted Block Images");
MODULE_AUTHOR("Artem Bityutskiy");
......
......@@ -57,7 +57,7 @@
* global sequence counter value. It also increases the global sequence
* counter.
*/
static unsigned long long next_sqnum(struct ubi_device *ubi)
unsigned long long ubi_next_sqnum(struct ubi_device *ubi)
{
unsigned long long sqnum;
......@@ -340,7 +340,9 @@ int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
up_read(&ubi->fm_sem);
err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
out_unlock:
......@@ -521,7 +523,7 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
goto out_put;
}
vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
if (err)
goto write_error;
......@@ -548,7 +550,9 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
mutex_unlock(&ubi->buf_mutex);
ubi_free_vid_hdr(ubi, vid_hdr);
down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = new_pnum;
up_read(&ubi->fm_sem);
ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
ubi_msg("data was successfully recovered");
......@@ -632,7 +636,7 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
}
vid_hdr->vol_type = UBI_VID_DYNAMIC;
vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
......@@ -665,7 +669,9 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
}
}
down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = pnum;
up_read(&ubi->fm_sem);
leb_write_unlock(ubi, vol_id, lnum);
ubi_free_vid_hdr(ubi, vid_hdr);
......@@ -692,7 +698,7 @@ int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
return err;
}
vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi_msg("try another PEB");
goto retry;
}
......@@ -745,7 +751,7 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
return err;
}
vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
......@@ -783,7 +789,9 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
}
ubi_assert(vol->eba_tbl[lnum] < 0);
down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = pnum;
up_read(&ubi->fm_sem);
leb_write_unlock(ubi, vol_id, lnum);
ubi_free_vid_hdr(ubi, vid_hdr);
......@@ -810,7 +818,7 @@ int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
return err;
}
vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi_msg("try another PEB");
goto retry;
}
......@@ -862,7 +870,7 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
if (err)
goto out_mutex;
vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
vid_hdr->vol_id = cpu_to_be32(vol_id);
vid_hdr->lnum = cpu_to_be32(lnum);
vid_hdr->compat = ubi_get_compat(ubi, vol_id);
......@@ -904,7 +912,9 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
goto out_leb_unlock;
}
down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = pnum;
up_read(&ubi->fm_sem);
out_leb_unlock:
leb_write_unlock(ubi, vol_id, lnum);
......@@ -930,7 +940,7 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
goto out_leb_unlock;
}
vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ubi_msg("try another PEB");
goto retry;
}
......@@ -1089,7 +1099,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
vid_hdr->data_size = cpu_to_be32(data_size);
vid_hdr->data_crc = cpu_to_be32(crc);
}
vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
if (err) {
......@@ -1151,7 +1161,9 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
}
ubi_assert(vol->eba_tbl[lnum] == from);
down_read(&ubi->fm_sem);
vol->eba_tbl[lnum] = to;
up_read(&ubi->fm_sem);
out_unlock_buf:
mutex_unlock(&ubi->buf_mutex);
......@@ -1201,6 +1213,102 @@ static void print_rsvd_warning(struct ubi_device *ubi,
ubi->corr_peb_count);
}
/**
* self_check_eba - run a self check on the EBA table constructed by fastmap.
* @ubi: UBI device description object
* @ai_fastmap: UBI attach info object created by fastmap
* @ai_scan: UBI attach info object created by scanning
*
* Returns < 0 in case of an internal error, 0 otherwise.
* If a bad EBA table entry was found it will be printed out and
* ubi_assert() triggers.
*/
int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
struct ubi_attach_info *ai_scan)
{
int i, j, num_volumes, ret = 0;
int **scan_eba, **fm_eba;
struct ubi_ainf_volume *av;
struct ubi_volume *vol;
struct ubi_ainf_peb *aeb;
struct rb_node *rb;
num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
scan_eba = kmalloc(sizeof(*scan_eba) * num_volumes, GFP_KERNEL);
if (!scan_eba)
return -ENOMEM;
fm_eba = kmalloc(sizeof(*fm_eba) * num_volumes, GFP_KERNEL);
if (!fm_eba) {
kfree(scan_eba);
return -ENOMEM;
}
for (i = 0; i < num_volumes; i++) {
vol = ubi->volumes[i];
if (!vol)
continue;
scan_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**scan_eba),
GFP_KERNEL);
if (!scan_eba[i]) {
ret = -ENOMEM;
goto out_free;
}
fm_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**fm_eba),
GFP_KERNEL);
if (!fm_eba[i]) {
ret = -ENOMEM;
goto out_free;
}
for (j = 0; j < vol->reserved_pebs; j++)
scan_eba[i][j] = fm_eba[i][j] = UBI_LEB_UNMAPPED;
av = ubi_find_av(ai_scan, idx2vol_id(ubi, i));
if (!av)
continue;
ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
scan_eba[i][aeb->lnum] = aeb->pnum;
av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i));
if (!av)
continue;
ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
fm_eba[i][aeb->lnum] = aeb->pnum;
for (j = 0; j < vol->reserved_pebs; j++) {
if (scan_eba[i][j] != fm_eba[i][j]) {
if (scan_eba[i][j] == UBI_LEB_UNMAPPED ||
fm_eba[i][j] == UBI_LEB_UNMAPPED)
continue;
ubi_err("LEB:%i:%i is PEB:%i instead of %i!",
vol->vol_id, i, fm_eba[i][j],
scan_eba[i][j]);
ubi_assert(0);
}
}
}
out_free:
for (i = 0; i < num_volumes; i++) {
if (!ubi->volumes[i])
continue;
kfree(scan_eba[i]);
kfree(fm_eba[i]);
}
kfree(scan_eba);
kfree(fm_eba);
return ret;
}
/**
* ubi_eba_init - initialize the EBA sub-system using attaching information.
* @ubi: UBI device description object
......
/*
* Copyright (c) 2012 Linutronix GmbH
* Author: Richard Weinberger <richard@nod.at>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
* the GNU General Public License for more details.
*
*/
#include <linux/crc32.h>
#include "ubi.h"
/**
* ubi_calc_fm_size - calculates the fastmap size in bytes for an UBI device.
* @ubi: UBI device description object
*/
size_t ubi_calc_fm_size(struct ubi_device *ubi)
{
size_t size;
size = sizeof(struct ubi_fm_hdr) + \
sizeof(struct ubi_fm_scan_pool) + \
sizeof(struct ubi_fm_scan_pool) + \
(ubi->peb_count * sizeof(struct ubi_fm_ec)) + \
(sizeof(struct ubi_fm_eba) + \
(ubi->peb_count * sizeof(__be32))) + \
sizeof(struct ubi_fm_volhdr) * UBI_MAX_VOLUMES;
return roundup(size, ubi->leb_size);
}
/**
* new_fm_vhdr - allocate a new volume header for fastmap usage.
* @ubi: UBI device description object
* @vol_id: the VID of the new header
*
* Returns a new struct ubi_vid_hdr on success.
* NULL indicates out of memory.
*/
static struct ubi_vid_hdr *new_fm_vhdr(struct ubi_device *ubi, int vol_id)
{
struct ubi_vid_hdr *new;
new = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
if (!new)
goto out;
new->vol_type = UBI_VID_DYNAMIC;
new->vol_id = cpu_to_be32(vol_id);
/* UBI implementations without fastmap support have to delete the
* fastmap.
*/
new->compat = UBI_COMPAT_DELETE;
out:
return new;
}
/**
* add_aeb - create and add a attach erase block to a given list.
* @ai: UBI attach info object
* @list: the target list
* @pnum: PEB number of the new attach erase block
* @ec: erease counter of the new LEB
* @scrub: scrub this PEB after attaching
*
* Returns 0 on success, < 0 indicates an internal error.
*/
static int add_aeb(struct ubi_attach_info *ai, struct list_head *list,
int pnum, int ec, int scrub)
{
struct ubi_ainf_peb *aeb;
aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
if (!aeb)
return -ENOMEM;
aeb->pnum = pnum;
aeb->ec = ec;
aeb->lnum = -1;
aeb->scrub = scrub;
aeb->copy_flag = aeb->sqnum = 0;
ai->ec_sum += aeb->ec;
ai->ec_count++;
if (ai->max_ec < aeb->ec)
ai->max_ec = aeb->ec;
if (ai->min_ec > aeb->ec)
ai->min_ec = aeb->ec;
list_add_tail(&aeb->u.list, list);
return 0;
}
/**
* add_vol - create and add a new volume to ubi_attach_info.
* @ai: ubi_attach_info object
* @vol_id: VID of the new volume
* @used_ebs: number of used EBS
* @data_pad: data padding value of the new volume
* @vol_type: volume type
* @last_eb_bytes: number of bytes in the last LEB
*
* Returns the new struct ubi_ainf_volume on success.
* NULL indicates an error.
*/
static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
int used_ebs, int data_pad, u8 vol_type,
int last_eb_bytes)
{
struct ubi_ainf_volume *av;
struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
while (*p) {
parent = *p;
av = rb_entry(parent, struct ubi_ainf_volume, rb);
if (vol_id > av->vol_id)
p = &(*p)->rb_left;
else if (vol_id > av->vol_id)
p = &(*p)->rb_right;
}
av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
if (!av)
goto out;
av->highest_lnum = av->leb_count = 0;
av->vol_id = vol_id;
av->used_ebs = used_ebs;
av->data_pad = data_pad;
av->last_data_size = last_eb_bytes;
av->compat = 0;
av->vol_type = vol_type;
av->root = RB_ROOT;
dbg_bld("found volume (ID %i)", vol_id);
rb_link_node(&av->rb, parent, p);
rb_insert_color(&av->rb, &ai->volumes);
out:
return av;
}
/**
* assign_aeb_to_av - assigns a SEB to a given ainf_volume and removes it
* from it's original list.
* @ai: ubi_attach_info object
* @aeb: the to be assigned SEB
* @av: target scan volume
*/
static void assign_aeb_to_av(struct ubi_attach_info *ai,
struct ubi_ainf_peb *aeb,
struct ubi_ainf_volume *av)
{
struct ubi_ainf_peb *tmp_aeb;
struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
p = &av->root.rb_node;
while (*p) {
parent = *p;
tmp_aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
if (aeb->lnum != tmp_aeb->lnum) {
if (aeb->lnum < tmp_aeb->lnum)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
continue;
} else
break;
}
list_del(&aeb->u.list);
av->leb_count++;
rb_link_node(&aeb->u.rb, parent, p);
rb_insert_color(&aeb->u.rb, &av->root);
}
/**
* update_vol - inserts or updates a LEB which was found a pool.
* @ubi: the UBI device object
* @ai: attach info object
* @av: the volume this LEB belongs to
* @new_vh: the volume header derived from new_aeb
* @new_aeb: the AEB to be examined
*
* Returns 0 on success, < 0 indicates an internal error.
*/
static int update_vol(struct ubi_device *ubi, struct ubi_attach_info *ai,
struct ubi_ainf_volume *av, struct ubi_vid_hdr *new_vh,
struct ubi_ainf_peb *new_aeb)
{
struct rb_node **p = &av->root.rb_node, *parent = NULL;
struct ubi_ainf_peb *aeb, *victim;
int cmp_res;
while (*p) {
parent = *p;
aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
if (be32_to_cpu(new_vh->lnum) != aeb->lnum) {
if (be32_to_cpu(new_vh->lnum) < aeb->lnum)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
continue;
}
/* This case can happen if the fastmap gets written
* because of a volume change (creation, deletion, ..).
* Then a PEB can be within the persistent EBA and the pool.
*/
if (aeb->pnum == new_aeb->pnum) {
ubi_assert(aeb->lnum == new_aeb->lnum);
kmem_cache_free(ai->aeb_slab_cache, new_aeb);
return 0;
}
cmp_res = ubi_compare_lebs(ubi, aeb, new_aeb->pnum, new_vh);
if (cmp_res < 0)
return cmp_res;
/* new_aeb is newer */
if (cmp_res & 1) {
victim = kmem_cache_alloc(ai->aeb_slab_cache,
GFP_KERNEL);
if (!victim)
return -ENOMEM;
victim->ec = aeb->ec;
victim->pnum = aeb->pnum;
list_add_tail(&victim->u.list, &ai->erase);
if (av->highest_lnum == be32_to_cpu(new_vh->lnum))
av->last_data_size = \
be32_to_cpu(new_vh->data_size);
dbg_bld("vol %i: AEB %i's PEB %i is the newer",
av->vol_id, aeb->lnum, new_aeb->pnum);
aeb->ec = new_aeb->ec;
aeb->pnum = new_aeb->pnum;
aeb->copy_flag = new_vh->copy_flag;
aeb->scrub = new_aeb->scrub;
kmem_cache_free(ai->aeb_slab_cache, new_aeb);
/* new_aeb is older */
} else {
dbg_bld("vol %i: AEB %i's PEB %i is old, dropping it",
av->vol_id, aeb->lnum, new_aeb->pnum);
list_add_tail(&new_aeb->u.list, &ai->erase);
}
return 0;
}
/* This LEB is new, let's add it to the volume */
if (av->highest_lnum <= be32_to_cpu(new_vh->lnum)) {
av->highest_lnum = be32_to_cpu(new_vh->lnum);
av->last_data_size = be32_to_cpu(new_vh->data_size);
}
if (av->vol_type == UBI_STATIC_VOLUME)
av->used_ebs = be32_to_cpu(new_vh->used_ebs);
av->leb_count++;
rb_link_node(&new_aeb->u.rb, parent, p);
rb_insert_color(&new_aeb->u.rb, &av->root);
return 0;
}
/**
* process_pool_aeb - we found a non-empty PEB in a pool.
* @ubi: UBI device object
* @ai: attach info object
* @new_vh: the volume header derived from new_aeb
* @new_aeb: the AEB to be examined
*
* Returns 0 on success, < 0 indicates an internal error.
*/
static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
struct ubi_vid_hdr *new_vh,
struct ubi_ainf_peb *new_aeb)
{
struct ubi_ainf_volume *av, *tmp_av = NULL;
struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
int found = 0;
if (be32_to_cpu(new_vh->vol_id) == UBI_FM_SB_VOLUME_ID ||
be32_to_cpu(new_vh->vol_id) == UBI_FM_DATA_VOLUME_ID) {
kmem_cache_free(ai->aeb_slab_cache, new_aeb);
return 0;
}
/* Find the volume this SEB belongs to */
while (*p) {
parent = *p;
tmp_av = rb_entry(parent, struct ubi_ainf_volume, rb);
if (be32_to_cpu(new_vh->vol_id) > tmp_av->vol_id)
p = &(*p)->rb_left;
else if (be32_to_cpu(new_vh->vol_id) < tmp_av->vol_id)
p = &(*p)->rb_right;
else {
found = 1;
break;
}
}
if (found)
av = tmp_av;
else {
ubi_err("orphaned volume in fastmap pool!");
return UBI_BAD_FASTMAP;
}
ubi_assert(be32_to_cpu(new_vh->vol_id) == av->vol_id);
return update_vol(ubi, ai, av, new_vh, new_aeb);
}
/**
* unmap_peb - unmap a PEB.
* If fastmap detects a free PEB in the pool it has to check whether
* this PEB has been unmapped after writing the fastmap.
*
* @ai: UBI attach info object
* @pnum: The PEB to be unmapped
*/
static void unmap_peb(struct ubi_attach_info *ai, int pnum)
{
struct ubi_ainf_volume *av;
struct rb_node *node, *node2;
struct ubi_ainf_peb *aeb;
for (node = rb_first(&ai->volumes); node; node = rb_next(node)) {
av = rb_entry(node, struct ubi_ainf_volume, rb);
for (node2 = rb_first(&av->root); node2;
node2 = rb_next(node2)) {
aeb = rb_entry(node2, struct ubi_ainf_peb, u.rb);
if (aeb->pnum == pnum) {
rb_erase(&aeb->u.rb, &av->root);
kmem_cache_free(ai->aeb_slab_cache, aeb);
return;
}
}
}
}
/**
* scan_pool - scans a pool for changed (no longer empty PEBs).
* @ubi: UBI device object
* @ai: attach info object
* @pebs: an array of all PEB numbers in the to be scanned pool
* @pool_size: size of the pool (number of entries in @pebs)
* @max_sqnum: pointer to the maximal sequence number
* @eba_orphans: list of PEBs which need to be scanned
* @free: list of PEBs which are most likely free (and go into @ai->free)
*
* Returns 0 on success, if the pool is unusable UBI_BAD_FASTMAP is returned.
* < 0 indicates an internal error.
*/
static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
int *pebs, int pool_size, unsigned long long *max_sqnum,
struct list_head *eba_orphans, struct list_head *free)
{
struct ubi_vid_hdr *vh;
struct ubi_ec_hdr *ech;
struct ubi_ainf_peb *new_aeb, *tmp_aeb;
int i, pnum, err, found_orphan, ret = 0;
ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ech)
return -ENOMEM;
vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
if (!vh) {
kfree(ech);
return -ENOMEM;
}
dbg_bld("scanning fastmap pool: size = %i", pool_size);
/*
* Now scan all PEBs in the pool to find changes which have been made
* after the creation of the fastmap
*/
for (i = 0; i < pool_size; i++) {
int scrub = 0;
pnum = be32_to_cpu(pebs[i]);
if (ubi_io_is_bad(ubi, pnum)) {
ubi_err("bad PEB in fastmap pool!");
ret = UBI_BAD_FASTMAP;
goto out;
}
err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
if (err && err != UBI_IO_BITFLIPS) {
ubi_err("unable to read EC header! PEB:%i err:%i",
pnum, err);
ret = err > 0 ? UBI_BAD_FASTMAP : err;
goto out;
} else if (ret == UBI_IO_BITFLIPS)
scrub = 1;
if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
ubi_err("bad image seq: 0x%x, expected: 0x%x",
be32_to_cpu(ech->image_seq), ubi->image_seq);
err = UBI_BAD_FASTMAP;
goto out;
}
err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
if (err == UBI_IO_FF || err == UBI_IO_FF_BITFLIPS) {
unsigned long long ec = be64_to_cpu(ech->ec);
unmap_peb(ai, pnum);
dbg_bld("Adding PEB to free: %i", pnum);
if (err == UBI_IO_FF_BITFLIPS)
add_aeb(ai, free, pnum, ec, 1);
else
add_aeb(ai, free, pnum, ec, 0);
continue;
} else if (err == 0 || err == UBI_IO_BITFLIPS) {
dbg_bld("Found non empty PEB:%i in pool", pnum);
if (err == UBI_IO_BITFLIPS)
scrub = 1;
found_orphan = 0;
list_for_each_entry(tmp_aeb, eba_orphans, u.list) {
if (tmp_aeb->pnum == pnum) {
found_orphan = 1;
break;
}
}
if (found_orphan) {
kmem_cache_free(ai->aeb_slab_cache, tmp_aeb);
list_del(&tmp_aeb->u.list);
}
new_aeb = kmem_cache_alloc(ai->aeb_slab_cache,
GFP_KERNEL);
if (!new_aeb) {
ret = -ENOMEM;
goto out;
}
new_aeb->ec = be64_to_cpu(ech->ec);
new_aeb->pnum = pnum;
new_aeb->lnum = be32_to_cpu(vh->lnum);
new_aeb->sqnum = be64_to_cpu(vh->sqnum);
new_aeb->copy_flag = vh->copy_flag;
new_aeb->scrub = scrub;
if (*max_sqnum < new_aeb->sqnum)
*max_sqnum = new_aeb->sqnum;
err = process_pool_aeb(ubi, ai, vh, new_aeb);
if (err) {
ret = err > 0 ? UBI_BAD_FASTMAP : err;
goto out;
}
} else {
/* We are paranoid and fall back to scanning mode */
ubi_err("fastmap pool PEBs contains damaged PEBs!");
ret = err > 0 ? UBI_BAD_FASTMAP : err;
goto out;
}
}
out:
ubi_free_vid_hdr(ubi, vh);
kfree(ech);
return ret;
}
/**
* count_fastmap_pebs - Counts the PEBs found by fastmap.
* @ai: The UBI attach info object
*/
static int count_fastmap_pebs(struct ubi_attach_info *ai)
{
struct ubi_ainf_peb *aeb;
struct ubi_ainf_volume *av;
struct rb_node *rb1, *rb2;
int n = 0;
list_for_each_entry(aeb, &ai->erase, u.list)
n++;
list_for_each_entry(aeb, &ai->free, u.list)
n++;
ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
n++;
return n;
}
/**
* ubi_attach_fastmap - creates ubi_attach_info from a fastmap.
* @ubi: UBI device object
* @ai: UBI attach info object
* @fm: the fastmap to be attached
*
* Returns 0 on success, UBI_BAD_FASTMAP if the found fastmap was unusable.
* < 0 indicates an internal error.
*/
static int ubi_attach_fastmap(struct ubi_device *ubi,
struct ubi_attach_info *ai,
struct ubi_fastmap_layout *fm)
{
struct list_head used, eba_orphans, free;
struct ubi_ainf_volume *av;
struct ubi_ainf_peb *aeb, *tmp_aeb, *_tmp_aeb;
struct ubi_ec_hdr *ech;
struct ubi_fm_sb *fmsb;
struct ubi_fm_hdr *fmhdr;
struct ubi_fm_scan_pool *fmpl1, *fmpl2;
struct ubi_fm_ec *fmec;
struct ubi_fm_volhdr *fmvhdr;
struct ubi_fm_eba *fm_eba;
int ret, i, j, pool_size, wl_pool_size;
size_t fm_pos = 0, fm_size = ubi->fm_size;
unsigned long long max_sqnum = 0;
void *fm_raw = ubi->fm_buf;
INIT_LIST_HEAD(&used);
INIT_LIST_HEAD(&free);
INIT_LIST_HEAD(&eba_orphans);
INIT_LIST_HEAD(&ai->corr);
INIT_LIST_HEAD(&ai->free);
INIT_LIST_HEAD(&ai->erase);
INIT_LIST_HEAD(&ai->alien);
ai->volumes = RB_ROOT;
ai->min_ec = UBI_MAX_ERASECOUNTER;
ai->aeb_slab_cache = kmem_cache_create("ubi_ainf_peb_slab",
sizeof(struct ubi_ainf_peb),
0, 0, NULL);
if (!ai->aeb_slab_cache) {
ret = -ENOMEM;
goto fail;
}
fmsb = (struct ubi_fm_sb *)(fm_raw);
ai->max_sqnum = fmsb->sqnum;
fm_pos += sizeof(struct ubi_fm_sb);
if (fm_pos >= fm_size)
goto fail_bad;
fmhdr = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmhdr);
if (fm_pos >= fm_size)
goto fail_bad;
if (be32_to_cpu(fmhdr->magic) != UBI_FM_HDR_MAGIC) {
ubi_err("bad fastmap header magic: 0x%x, expected: 0x%x",
be32_to_cpu(fmhdr->magic), UBI_FM_HDR_MAGIC);
goto fail_bad;
}
fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmpl1);
if (fm_pos >= fm_size)
goto fail_bad;
if (be32_to_cpu(fmpl1->magic) != UBI_FM_POOL_MAGIC) {
ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
be32_to_cpu(fmpl1->magic), UBI_FM_POOL_MAGIC);
goto fail_bad;
}
fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmpl2);
if (fm_pos >= fm_size)
goto fail_bad;
if (be32_to_cpu(fmpl2->magic) != UBI_FM_POOL_MAGIC) {
ubi_err("bad fastmap pool magic: 0x%x, expected: 0x%x",
be32_to_cpu(fmpl2->magic), UBI_FM_POOL_MAGIC);
goto fail_bad;
}
pool_size = be16_to_cpu(fmpl1->size);
wl_pool_size = be16_to_cpu(fmpl2->size);
fm->max_pool_size = be16_to_cpu(fmpl1->max_size);
fm->max_wl_pool_size = be16_to_cpu(fmpl2->max_size);
if (pool_size > UBI_FM_MAX_POOL_SIZE || pool_size < 0) {
ubi_err("bad pool size: %i", pool_size);
goto fail_bad;
}
if (wl_pool_size > UBI_FM_MAX_POOL_SIZE || wl_pool_size < 0) {
ubi_err("bad WL pool size: %i", wl_pool_size);
goto fail_bad;
}
if (fm->max_pool_size > UBI_FM_MAX_POOL_SIZE ||
fm->max_pool_size < 0) {
ubi_err("bad maximal pool size: %i", fm->max_pool_size);
goto fail_bad;
}
if (fm->max_wl_pool_size > UBI_FM_MAX_POOL_SIZE ||
fm->max_wl_pool_size < 0) {
ubi_err("bad maximal WL pool size: %i", fm->max_wl_pool_size);
goto fail_bad;
}
/* read EC values from free list */
for (i = 0; i < be32_to_cpu(fmhdr->free_peb_count); i++) {
fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmec);
if (fm_pos >= fm_size)
goto fail_bad;
add_aeb(ai, &ai->free, be32_to_cpu(fmec->pnum),
be32_to_cpu(fmec->ec), 0);
}
/* read EC values from used list */
for (i = 0; i < be32_to_cpu(fmhdr->used_peb_count); i++) {
fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmec);
if (fm_pos >= fm_size)
goto fail_bad;
add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
be32_to_cpu(fmec->ec), 0);
}
/* read EC values from scrub list */
for (i = 0; i < be32_to_cpu(fmhdr->scrub_peb_count); i++) {
fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmec);
if (fm_pos >= fm_size)
goto fail_bad;
add_aeb(ai, &used, be32_to_cpu(fmec->pnum),
be32_to_cpu(fmec->ec), 1);
}
/* read EC values from erase list */
for (i = 0; i < be32_to_cpu(fmhdr->erase_peb_count); i++) {
fmec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmec);
if (fm_pos >= fm_size)
goto fail_bad;
add_aeb(ai, &ai->erase, be32_to_cpu(fmec->pnum),
be32_to_cpu(fmec->ec), 1);
}
ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
ai->bad_peb_count = be32_to_cpu(fmhdr->bad_peb_count);
/* Iterate over all volumes and read their EBA table */
for (i = 0; i < be32_to_cpu(fmhdr->vol_count); i++) {
fmvhdr = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmvhdr);
if (fm_pos >= fm_size)
goto fail_bad;
if (be32_to_cpu(fmvhdr->magic) != UBI_FM_VHDR_MAGIC) {
ubi_err("bad fastmap vol header magic: 0x%x, " \
"expected: 0x%x",
be32_to_cpu(fmvhdr->magic), UBI_FM_VHDR_MAGIC);
goto fail_bad;
}
av = add_vol(ai, be32_to_cpu(fmvhdr->vol_id),
be32_to_cpu(fmvhdr->used_ebs),
be32_to_cpu(fmvhdr->data_pad),
fmvhdr->vol_type,
be32_to_cpu(fmvhdr->last_eb_bytes));
if (!av)
goto fail_bad;
ai->vols_found++;
if (ai->highest_vol_id < be32_to_cpu(fmvhdr->vol_id))
ai->highest_vol_id = be32_to_cpu(fmvhdr->vol_id);
fm_eba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
fm_pos += sizeof(*fm_eba);
fm_pos += (sizeof(__be32) * be32_to_cpu(fm_eba->reserved_pebs));
if (fm_pos >= fm_size)
goto fail_bad;
if (be32_to_cpu(fm_eba->magic) != UBI_FM_EBA_MAGIC) {
ubi_err("bad fastmap EBA header magic: 0x%x, " \
"expected: 0x%x",
be32_to_cpu(fm_eba->magic), UBI_FM_EBA_MAGIC);
goto fail_bad;
}
for (j = 0; j < be32_to_cpu(fm_eba->reserved_pebs); j++) {
int pnum = be32_to_cpu(fm_eba->pnum[j]);
if ((int)be32_to_cpu(fm_eba->pnum[j]) < 0)
continue;
aeb = NULL;
list_for_each_entry(tmp_aeb, &used, u.list) {
if (tmp_aeb->pnum == pnum)
aeb = tmp_aeb;
}
/* This can happen if a PEB is already in an EBA known
* by this fastmap but the PEB itself is not in the used
* list.
* In this case the PEB can be within the fastmap pool
* or while writing the fastmap it was in the protection
* queue.
*/
if (!aeb) {
aeb = kmem_cache_alloc(ai->aeb_slab_cache,
GFP_KERNEL);
if (!aeb) {
ret = -ENOMEM;
goto fail;
}
aeb->lnum = j;
aeb->pnum = be32_to_cpu(fm_eba->pnum[j]);
aeb->ec = -1;
aeb->scrub = aeb->copy_flag = aeb->sqnum = 0;
list_add_tail(&aeb->u.list, &eba_orphans);
continue;
}
aeb->lnum = j;
if (av->highest_lnum <= aeb->lnum)
av->highest_lnum = aeb->lnum;
assign_aeb_to_av(ai, aeb, av);
dbg_bld("inserting PEB:%i (LEB %i) to vol %i",
aeb->pnum, aeb->lnum, av->vol_id);
}
ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ech) {
ret = -ENOMEM;
goto fail;
}
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &eba_orphans,
u.list) {
int err;
if (ubi_io_is_bad(ubi, tmp_aeb->pnum)) {
ubi_err("bad PEB in fastmap EBA orphan list");
ret = UBI_BAD_FASTMAP;
kfree(ech);
goto fail;
}
err = ubi_io_read_ec_hdr(ubi, tmp_aeb->pnum, ech, 0);
if (err && err != UBI_IO_BITFLIPS) {
ubi_err("unable to read EC header! PEB:%i " \
"err:%i", tmp_aeb->pnum, err);
ret = err > 0 ? UBI_BAD_FASTMAP : err;
kfree(ech);
goto fail;
} else if (err == UBI_IO_BITFLIPS)
tmp_aeb->scrub = 1;
tmp_aeb->ec = be64_to_cpu(ech->ec);
assign_aeb_to_av(ai, tmp_aeb, av);
}
kfree(ech);
}
ret = scan_pool(ubi, ai, fmpl1->pebs, pool_size, &max_sqnum,
&eba_orphans, &free);
if (ret)
goto fail;
ret = scan_pool(ubi, ai, fmpl2->pebs, wl_pool_size, &max_sqnum,
&eba_orphans, &free);
if (ret)
goto fail;
if (max_sqnum > ai->max_sqnum)
ai->max_sqnum = max_sqnum;
list_for_each_entry_safe(tmp_aeb, _tmp_aeb, &free, u.list) {
list_del(&tmp_aeb->u.list);
list_add_tail(&tmp_aeb->u.list, &ai->free);
}
/*
* If fastmap is leaking PEBs (must not happen), raise a
* fat warning and fall back to scanning mode.
* We do this here because in ubi_wl_init() it's too late
* and we cannot fall back to scanning.
*/
if (WARN_ON(count_fastmap_pebs(ai) != ubi->peb_count -
ai->bad_peb_count - fm->used_blocks))
goto fail_bad;
return 0;
fail_bad:
ret = UBI_BAD_FASTMAP;
fail:
return ret;
}
/**
* ubi_scan_fastmap - scan the fastmap.
* @ubi: UBI device object
* @ai: UBI attach info to be filled
* @fm_anchor: The fastmap starts at this PEB
*
* Returns 0 on success, UBI_NO_FASTMAP if no fastmap was found,
* UBI_BAD_FASTMAP if one was found but is not usable.
* < 0 indicates an internal error.
*/
int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
int fm_anchor)
{
struct ubi_fm_sb *fmsb, *fmsb2;
struct ubi_vid_hdr *vh;
struct ubi_ec_hdr *ech;
struct ubi_fastmap_layout *fm;
int i, used_blocks, pnum, ret = 0;
size_t fm_size;
__be32 crc, tmp_crc;
unsigned long long sqnum = 0;
mutex_lock(&ubi->fm_mutex);
memset(ubi->fm_buf, 0, ubi->fm_size);
fmsb = kmalloc(sizeof(*fmsb), GFP_KERNEL);
if (!fmsb) {
ret = -ENOMEM;
goto out;
}
fm = kzalloc(sizeof(*fm), GFP_KERNEL);
if (!fm) {
ret = -ENOMEM;
kfree(fmsb);
goto out;
}
ret = ubi_io_read(ubi, fmsb, fm_anchor, ubi->leb_start, sizeof(*fmsb));
if (ret && ret != UBI_IO_BITFLIPS)
goto free_fm_sb;
else if (ret == UBI_IO_BITFLIPS)
fm->to_be_tortured[0] = 1;
if (be32_to_cpu(fmsb->magic) != UBI_FM_SB_MAGIC) {
ubi_err("bad super block magic: 0x%x, expected: 0x%x",
be32_to_cpu(fmsb->magic), UBI_FM_SB_MAGIC);
ret = UBI_BAD_FASTMAP;
goto free_fm_sb;
}
if (fmsb->version != UBI_FM_FMT_VERSION) {
ubi_err("bad fastmap version: %i, expected: %i",
fmsb->version, UBI_FM_FMT_VERSION);
ret = UBI_BAD_FASTMAP;
goto free_fm_sb;
}
used_blocks = be32_to_cpu(fmsb->used_blocks);
if (used_blocks > UBI_FM_MAX_BLOCKS || used_blocks < 1) {
ubi_err("number of fastmap blocks is invalid: %i", used_blocks);
ret = UBI_BAD_FASTMAP;
goto free_fm_sb;
}
fm_size = ubi->leb_size * used_blocks;
if (fm_size != ubi->fm_size) {
ubi_err("bad fastmap size: %zi, expected: %zi", fm_size,
ubi->fm_size);
ret = UBI_BAD_FASTMAP;
goto free_fm_sb;
}
ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ech) {
ret = -ENOMEM;
goto free_fm_sb;
}
vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
if (!vh) {
ret = -ENOMEM;
goto free_hdr;
}
for (i = 0; i < used_blocks; i++) {
pnum = be32_to_cpu(fmsb->block_loc[i]);
if (ubi_io_is_bad(ubi, pnum)) {
ret = UBI_BAD_FASTMAP;
goto free_hdr;
}
ret = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
if (ret && ret != UBI_IO_BITFLIPS) {
ubi_err("unable to read fastmap block# %i EC (PEB: %i)",
i, pnum);
if (ret > 0)
ret = UBI_BAD_FASTMAP;
goto free_hdr;
} else if (ret == UBI_IO_BITFLIPS)
fm->to_be_tortured[i] = 1;
if (!ubi->image_seq)
ubi->image_seq = be32_to_cpu(ech->image_seq);
if (be32_to_cpu(ech->image_seq) != ubi->image_seq) {
ret = UBI_BAD_FASTMAP;
goto free_hdr;
}
ret = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
if (ret && ret != UBI_IO_BITFLIPS) {
ubi_err("unable to read fastmap block# %i (PEB: %i)",
i, pnum);
goto free_hdr;
}
if (i == 0) {
if (be32_to_cpu(vh->vol_id) != UBI_FM_SB_VOLUME_ID) {
ubi_err("bad fastmap anchor vol_id: 0x%x," \
" expected: 0x%x",
be32_to_cpu(vh->vol_id),
UBI_FM_SB_VOLUME_ID);
ret = UBI_BAD_FASTMAP;
goto free_hdr;
}
} else {
if (be32_to_cpu(vh->vol_id) != UBI_FM_DATA_VOLUME_ID) {
ubi_err("bad fastmap data vol_id: 0x%x," \
" expected: 0x%x",
be32_to_cpu(vh->vol_id),
UBI_FM_DATA_VOLUME_ID);
ret = UBI_BAD_FASTMAP;
goto free_hdr;
}
}
if (sqnum < be64_to_cpu(vh->sqnum))
sqnum = be64_to_cpu(vh->sqnum);
ret = ubi_io_read(ubi, ubi->fm_buf + (ubi->leb_size * i), pnum,
ubi->leb_start, ubi->leb_size);
if (ret && ret != UBI_IO_BITFLIPS) {
ubi_err("unable to read fastmap block# %i (PEB: %i, " \
"err: %i)", i, pnum, ret);
goto free_hdr;
}
}
kfree(fmsb);
fmsb = NULL;
fmsb2 = (struct ubi_fm_sb *)(ubi->fm_buf);
tmp_crc = be32_to_cpu(fmsb2->data_crc);
fmsb2->data_crc = 0;
crc = crc32(UBI_CRC32_INIT, ubi->fm_buf, fm_size);
if (crc != tmp_crc) {
ubi_err("fastmap data CRC is invalid");
ubi_err("CRC should be: 0x%x, calc: 0x%x", tmp_crc, crc);
ret = UBI_BAD_FASTMAP;
goto free_hdr;
}
fmsb2->sqnum = sqnum;
fm->used_blocks = used_blocks;
ret = ubi_attach_fastmap(ubi, ai, fm);
if (ret) {
if (ret > 0)
ret = UBI_BAD_FASTMAP;
goto free_hdr;
}
for (i = 0; i < used_blocks; i++) {
struct ubi_wl_entry *e;
e = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!e) {
while (i--)
kfree(fm->e[i]);
ret = -ENOMEM;
goto free_hdr;
}
e->pnum = be32_to_cpu(fmsb2->block_loc[i]);
e->ec = be32_to_cpu(fmsb2->block_ec[i]);
fm->e[i] = e;
}
ubi->fm = fm;
ubi->fm_pool.max_size = ubi->fm->max_pool_size;
ubi->fm_wl_pool.max_size = ubi->fm->max_wl_pool_size;
ubi_msg("attached by fastmap");
ubi_msg("fastmap pool size: %d", ubi->fm_pool.max_size);
ubi_msg("fastmap WL pool size: %d", ubi->fm_wl_pool.max_size);
ubi->fm_disabled = 0;
ubi_free_vid_hdr(ubi, vh);
kfree(ech);
out:
mutex_unlock(&ubi->fm_mutex);
if (ret == UBI_BAD_FASTMAP)
ubi_err("Attach by fastmap failed, doing a full scan!");
return ret;
free_hdr:
ubi_free_vid_hdr(ubi, vh);
kfree(ech);
free_fm_sb:
kfree(fmsb);
kfree(fm);
goto out;
}
/**
* ubi_write_fastmap - writes a fastmap.
* @ubi: UBI device object
* @new_fm: the to be written fastmap
*
* Returns 0 on success, < 0 indicates an internal error.
*/
static int ubi_write_fastmap(struct ubi_device *ubi,
struct ubi_fastmap_layout *new_fm)
{
size_t fm_pos = 0;
void *fm_raw;
struct ubi_fm_sb *fmsb;
struct ubi_fm_hdr *fmh;
struct ubi_fm_scan_pool *fmpl1, *fmpl2;
struct ubi_fm_ec *fec;
struct ubi_fm_volhdr *fvh;
struct ubi_fm_eba *feba;
struct rb_node *node;
struct ubi_wl_entry *wl_e;
struct ubi_volume *vol;
struct ubi_vid_hdr *avhdr, *dvhdr;
struct ubi_work *ubi_wrk;
int ret, i, j, free_peb_count, used_peb_count, vol_count;
int scrub_peb_count, erase_peb_count;
fm_raw = ubi->fm_buf;
memset(ubi->fm_buf, 0, ubi->fm_size);
avhdr = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
if (!avhdr) {
ret = -ENOMEM;
goto out;
}
dvhdr = new_fm_vhdr(ubi, UBI_FM_DATA_VOLUME_ID);
if (!dvhdr) {
ret = -ENOMEM;
goto out_kfree;
}
spin_lock(&ubi->volumes_lock);
spin_lock(&ubi->wl_lock);
fmsb = (struct ubi_fm_sb *)fm_raw;
fm_pos += sizeof(*fmsb);
ubi_assert(fm_pos <= ubi->fm_size);
fmh = (struct ubi_fm_hdr *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmh);
ubi_assert(fm_pos <= ubi->fm_size);
fmsb->magic = cpu_to_be32(UBI_FM_SB_MAGIC);
fmsb->version = UBI_FM_FMT_VERSION;
fmsb->used_blocks = cpu_to_be32(new_fm->used_blocks);
/* the max sqnum will be filled in while *reading* the fastmap */
fmsb->sqnum = 0;
fmh->magic = cpu_to_be32(UBI_FM_HDR_MAGIC);
free_peb_count = 0;
used_peb_count = 0;
scrub_peb_count = 0;
erase_peb_count = 0;
vol_count = 0;
fmpl1 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmpl1);
fmpl1->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
fmpl1->size = cpu_to_be16(ubi->fm_pool.size);
fmpl1->max_size = cpu_to_be16(ubi->fm_pool.max_size);
for (i = 0; i < ubi->fm_pool.size; i++)
fmpl1->pebs[i] = cpu_to_be32(ubi->fm_pool.pebs[i]);
fmpl2 = (struct ubi_fm_scan_pool *)(fm_raw + fm_pos);
fm_pos += sizeof(*fmpl2);
fmpl2->magic = cpu_to_be32(UBI_FM_POOL_MAGIC);
fmpl2->size = cpu_to_be16(ubi->fm_wl_pool.size);
fmpl2->max_size = cpu_to_be16(ubi->fm_wl_pool.max_size);
for (i = 0; i < ubi->fm_wl_pool.size; i++)
fmpl2->pebs[i] = cpu_to_be32(ubi->fm_wl_pool.pebs[i]);
for (node = rb_first(&ubi->free); node; node = rb_next(node)) {
wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fec->pnum = cpu_to_be32(wl_e->pnum);
fec->ec = cpu_to_be32(wl_e->ec);
free_peb_count++;
fm_pos += sizeof(*fec);
ubi_assert(fm_pos <= ubi->fm_size);
}
fmh->free_peb_count = cpu_to_be32(free_peb_count);
for (node = rb_first(&ubi->used); node; node = rb_next(node)) {
wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fec->pnum = cpu_to_be32(wl_e->pnum);
fec->ec = cpu_to_be32(wl_e->ec);
used_peb_count++;
fm_pos += sizeof(*fec);
ubi_assert(fm_pos <= ubi->fm_size);
}
fmh->used_peb_count = cpu_to_be32(used_peb_count);
for (node = rb_first(&ubi->scrub); node; node = rb_next(node)) {
wl_e = rb_entry(node, struct ubi_wl_entry, u.rb);
fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fec->pnum = cpu_to_be32(wl_e->pnum);
fec->ec = cpu_to_be32(wl_e->ec);
scrub_peb_count++;
fm_pos += sizeof(*fec);
ubi_assert(fm_pos <= ubi->fm_size);
}
fmh->scrub_peb_count = cpu_to_be32(scrub_peb_count);
list_for_each_entry(ubi_wrk, &ubi->works, list) {
if (ubi_is_erase_work(ubi_wrk)) {
wl_e = ubi_wrk->e;
ubi_assert(wl_e);
fec = (struct ubi_fm_ec *)(fm_raw + fm_pos);
fec->pnum = cpu_to_be32(wl_e->pnum);
fec->ec = cpu_to_be32(wl_e->ec);
erase_peb_count++;
fm_pos += sizeof(*fec);
ubi_assert(fm_pos <= ubi->fm_size);
}
}
fmh->erase_peb_count = cpu_to_be32(erase_peb_count);
for (i = 0; i < UBI_MAX_VOLUMES + UBI_INT_VOL_COUNT; i++) {
vol = ubi->volumes[i];
if (!vol)
continue;
vol_count++;
fvh = (struct ubi_fm_volhdr *)(fm_raw + fm_pos);
fm_pos += sizeof(*fvh);
ubi_assert(fm_pos <= ubi->fm_size);
fvh->magic = cpu_to_be32(UBI_FM_VHDR_MAGIC);
fvh->vol_id = cpu_to_be32(vol->vol_id);
fvh->vol_type = vol->vol_type;
fvh->used_ebs = cpu_to_be32(vol->used_ebs);
fvh->data_pad = cpu_to_be32(vol->data_pad);
fvh->last_eb_bytes = cpu_to_be32(vol->last_eb_bytes);
ubi_assert(vol->vol_type == UBI_DYNAMIC_VOLUME ||
vol->vol_type == UBI_STATIC_VOLUME);
feba = (struct ubi_fm_eba *)(fm_raw + fm_pos);
fm_pos += sizeof(*feba) + (sizeof(__be32) * vol->reserved_pebs);
ubi_assert(fm_pos <= ubi->fm_size);
for (j = 0; j < vol->reserved_pebs; j++)
feba->pnum[j] = cpu_to_be32(vol->eba_tbl[j]);
feba->reserved_pebs = cpu_to_be32(j);
feba->magic = cpu_to_be32(UBI_FM_EBA_MAGIC);
}
fmh->vol_count = cpu_to_be32(vol_count);
fmh->bad_peb_count = cpu_to_be32(ubi->bad_peb_count);
avhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
avhdr->lnum = 0;
spin_unlock(&ubi->wl_lock);
spin_unlock(&ubi->volumes_lock);
dbg_bld("writing fastmap SB to PEB %i", new_fm->e[0]->pnum);
ret = ubi_io_write_vid_hdr(ubi, new_fm->e[0]->pnum, avhdr);
if (ret) {
ubi_err("unable to write vid_hdr to fastmap SB!");
goto out_kfree;
}
for (i = 0; i < new_fm->used_blocks; i++) {
fmsb->block_loc[i] = cpu_to_be32(new_fm->e[i]->pnum);
fmsb->block_ec[i] = cpu_to_be32(new_fm->e[i]->ec);
}
fmsb->data_crc = 0;
fmsb->data_crc = cpu_to_be32(crc32(UBI_CRC32_INIT, fm_raw,
ubi->fm_size));
for (i = 1; i < new_fm->used_blocks; i++) {
dvhdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
dvhdr->lnum = cpu_to_be32(i);
dbg_bld("writing fastmap data to PEB %i sqnum %llu",
new_fm->e[i]->pnum, be64_to_cpu(dvhdr->sqnum));
ret = ubi_io_write_vid_hdr(ubi, new_fm->e[i]->pnum, dvhdr);
if (ret) {
ubi_err("unable to write vid_hdr to PEB %i!",
new_fm->e[i]->pnum);
goto out_kfree;
}
}
for (i = 0; i < new_fm->used_blocks; i++) {
ret = ubi_io_write(ubi, fm_raw + (i * ubi->leb_size),
new_fm->e[i]->pnum, ubi->leb_start, ubi->leb_size);
if (ret) {
ubi_err("unable to write fastmap to PEB %i!",
new_fm->e[i]->pnum);
goto out_kfree;
}
}
ubi_assert(new_fm);
ubi->fm = new_fm;
dbg_bld("fastmap written!");
out_kfree:
ubi_free_vid_hdr(ubi, avhdr);
ubi_free_vid_hdr(ubi, dvhdr);
out:
return ret;
}
/**
* erase_block - Manually erase a PEB.
* @ubi: UBI device object
* @pnum: PEB to be erased
*
* Returns the new EC value on success, < 0 indicates an internal error.
*/
static int erase_block(struct ubi_device *ubi, int pnum)
{
int ret;
struct ubi_ec_hdr *ec_hdr;
long long ec;
ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
if (!ec_hdr)
return -ENOMEM;
ret = ubi_io_read_ec_hdr(ubi, pnum, ec_hdr, 0);
if (ret < 0)
goto out;
else if (ret && ret != UBI_IO_BITFLIPS) {
ret = -EINVAL;
goto out;
}
ret = ubi_io_sync_erase(ubi, pnum, 0);
if (ret < 0)
goto out;
ec = be64_to_cpu(ec_hdr->ec);
ec += ret;
if (ec > UBI_MAX_ERASECOUNTER) {
ret = -EINVAL;
goto out;
}
ec_hdr->ec = cpu_to_be64(ec);
ret = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
if (ret < 0)
goto out;
ret = ec;
out:
kfree(ec_hdr);
return ret;
}
/**
* invalidate_fastmap - destroys a fastmap.
* @ubi: UBI device object
* @fm: the fastmap to be destroyed
*
* Returns 0 on success, < 0 indicates an internal error.
*/
static int invalidate_fastmap(struct ubi_device *ubi,
struct ubi_fastmap_layout *fm)
{
int ret, i;
struct ubi_vid_hdr *vh;
ret = erase_block(ubi, fm->e[0]->pnum);
if (ret < 0)
return ret;
vh = new_fm_vhdr(ubi, UBI_FM_SB_VOLUME_ID);
if (!vh)
return -ENOMEM;
/* deleting the current fastmap SB is not enough, an old SB may exist,
* so create a (corrupted) SB such that fastmap will find it and fall
* back to scanning mode in any case */
vh->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
ret = ubi_io_write_vid_hdr(ubi, fm->e[0]->pnum, vh);
for (i = 0; i < fm->used_blocks; i++)
ubi_wl_put_fm_peb(ubi, fm->e[i], i, fm->to_be_tortured[i]);
return ret;
}
/**
* ubi_update_fastmap - will be called by UBI if a volume changes or
* a fastmap pool becomes full.
* @ubi: UBI device object
*
* Returns 0 on success, < 0 indicates an internal error.
*/
int ubi_update_fastmap(struct ubi_device *ubi)
{
int ret, i;
struct ubi_fastmap_layout *new_fm, *old_fm;
struct ubi_wl_entry *tmp_e;
mutex_lock(&ubi->fm_mutex);
ubi_refill_pools(ubi);
if (ubi->ro_mode || ubi->fm_disabled) {
mutex_unlock(&ubi->fm_mutex);
return 0;
}
ret = ubi_ensure_anchor_pebs(ubi);
if (ret) {
mutex_unlock(&ubi->fm_mutex);
return ret;
}
new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
if (!new_fm) {
mutex_unlock(&ubi->fm_mutex);
return -ENOMEM;
}
new_fm->used_blocks = ubi->fm_size / ubi->leb_size;
for (i = 0; i < new_fm->used_blocks; i++) {
new_fm->e[i] = kmem_cache_alloc(ubi_wl_entry_slab, GFP_KERNEL);
if (!new_fm->e[i]) {
while (i--)
kfree(new_fm->e[i]);
kfree(new_fm);
mutex_unlock(&ubi->fm_mutex);
return -ENOMEM;
}
}
old_fm = ubi->fm;
ubi->fm = NULL;
if (new_fm->used_blocks > UBI_FM_MAX_BLOCKS) {
ubi_err("fastmap too large");
ret = -ENOSPC;
goto err;
}
for (i = 1; i < new_fm->used_blocks; i++) {
spin_lock(&ubi->wl_lock);
tmp_e = ubi_wl_get_fm_peb(ubi, 0);
spin_unlock(&ubi->wl_lock);
if (!tmp_e && !old_fm) {
int j;
ubi_err("could not get any free erase block");
for (j = 1; j < i; j++)
ubi_wl_put_fm_peb(ubi, new_fm->e[j], j, 0);
ret = -ENOSPC;
goto err;
} else if (!tmp_e && old_fm) {
ret = erase_block(ubi, old_fm->e[i]->pnum);
if (ret < 0) {
int j;
for (j = 1; j < i; j++)
ubi_wl_put_fm_peb(ubi, new_fm->e[j],
j, 0);
ubi_err("could not erase old fastmap PEB");
goto err;
}
new_fm->e[i]->pnum = old_fm->e[i]->pnum;
new_fm->e[i]->ec = old_fm->e[i]->ec;
} else {
new_fm->e[i]->pnum = tmp_e->pnum;
new_fm->e[i]->ec = tmp_e->ec;
if (old_fm)
ubi_wl_put_fm_peb(ubi, old_fm->e[i], i,
old_fm->to_be_tortured[i]);
}
}
spin_lock(&ubi->wl_lock);
tmp_e = ubi_wl_get_fm_peb(ubi, 1);
spin_unlock(&ubi->wl_lock);
if (old_fm) {
/* no fresh anchor PEB was found, reuse the old one */
if (!tmp_e) {
ret = erase_block(ubi, old_fm->e[0]->pnum);
if (ret < 0) {
int i;
ubi_err("could not erase old anchor PEB");
for (i = 1; i < new_fm->used_blocks; i++)
ubi_wl_put_fm_peb(ubi, new_fm->e[i],
i, 0);
goto err;
}
new_fm->e[0]->pnum = old_fm->e[0]->pnum;
new_fm->e[0]->ec = ret;
} else {
/* we've got a new anchor PEB, return the old one */
ubi_wl_put_fm_peb(ubi, old_fm->e[0], 0,
old_fm->to_be_tortured[0]);
new_fm->e[0]->pnum = tmp_e->pnum;
new_fm->e[0]->ec = tmp_e->ec;
}
} else {
if (!tmp_e) {
int i;
ubi_err("could not find any anchor PEB");
for (i = 1; i < new_fm->used_blocks; i++)
ubi_wl_put_fm_peb(ubi, new_fm->e[i], i, 0);
ret = -ENOSPC;
goto err;
}
new_fm->e[0]->pnum = tmp_e->pnum;
new_fm->e[0]->ec = tmp_e->ec;
}
down_write(&ubi->work_sem);
down_write(&ubi->fm_sem);
ret = ubi_write_fastmap(ubi, new_fm);
up_write(&ubi->fm_sem);
up_write(&ubi->work_sem);
if (ret)
goto err;
out_unlock:
mutex_unlock(&ubi->fm_mutex);
kfree(old_fm);
return ret;
err:
kfree(new_fm);
ubi_warn("Unable to write new fastmap, err=%i", ret);
ret = 0;
if (old_fm) {
ret = invalidate_fastmap(ubi, old_fm);
if (ret < 0)
ubi_err("Unable to invalidiate current fastmap!");
else if (ret)
ret = 0;
}
goto out_unlock;
}
......@@ -375,4 +375,141 @@ struct ubi_vtbl_record {
__be32 crc;
} __packed;
/* UBI fastmap on-flash data structures */
#define UBI_FM_SB_VOLUME_ID (UBI_LAYOUT_VOLUME_ID + 1)
#define UBI_FM_DATA_VOLUME_ID (UBI_LAYOUT_VOLUME_ID + 2)
/* fastmap on-flash data structure format version */
#define UBI_FM_FMT_VERSION 1
#define UBI_FM_SB_MAGIC 0x7B11D69F
#define UBI_FM_HDR_MAGIC 0xD4B82EF7
#define UBI_FM_VHDR_MAGIC 0xFA370ED1
#define UBI_FM_POOL_MAGIC 0x67AF4D08
#define UBI_FM_EBA_MAGIC 0xf0c040a8
/* A fastmap supber block can be located between PEB 0 and
* UBI_FM_MAX_START */
#define UBI_FM_MAX_START 64
/* A fastmap can use up to UBI_FM_MAX_BLOCKS PEBs */
#define UBI_FM_MAX_BLOCKS 32
/* 5% of the total number of PEBs have to be scanned while attaching
* from a fastmap.
* But the size of this pool is limited to be between UBI_FM_MIN_POOL_SIZE and
* UBI_FM_MAX_POOL_SIZE */
#define UBI_FM_MIN_POOL_SIZE 8
#define UBI_FM_MAX_POOL_SIZE 256
#define UBI_FM_WL_POOL_SIZE 25
/**
* struct ubi_fm_sb - UBI fastmap super block
* @magic: fastmap super block magic number (%UBI_FM_SB_MAGIC)
* @version: format version of this fastmap
* @data_crc: CRC over the fastmap data
* @used_blocks: number of PEBs used by this fastmap
* @block_loc: an array containing the location of all PEBs of the fastmap
* @block_ec: the erase counter of each used PEB
* @sqnum: highest sequence number value at the time while taking the fastmap
*
*/
struct ubi_fm_sb {
__be32 magic;
__u8 version;
__u8 padding1[3];
__be32 data_crc;
__be32 used_blocks;
__be32 block_loc[UBI_FM_MAX_BLOCKS];
__be32 block_ec[UBI_FM_MAX_BLOCKS];
__be64 sqnum;
__u8 padding2[32];
} __packed;
/**
* struct ubi_fm_hdr - header of the fastmap data set
* @magic: fastmap header magic number (%UBI_FM_HDR_MAGIC)
* @free_peb_count: number of free PEBs known by this fastmap
* @used_peb_count: number of used PEBs known by this fastmap
* @scrub_peb_count: number of to be scrubbed PEBs known by this fastmap
* @bad_peb_count: number of bad PEBs known by this fastmap
* @erase_peb_count: number of bad PEBs which have to be erased
* @vol_count: number of UBI volumes known by this fastmap
*/
struct ubi_fm_hdr {
__be32 magic;
__be32 free_peb_count;
__be32 used_peb_count;
__be32 scrub_peb_count;
__be32 bad_peb_count;
__be32 erase_peb_count;
__be32 vol_count;
__u8 padding[4];
} __packed;
/* struct ubi_fm_hdr is followed by two struct ubi_fm_scan_pool */
/**
* struct ubi_fm_scan_pool - Fastmap pool PEBs to be scanned while attaching
* @magic: pool magic numer (%UBI_FM_POOL_MAGIC)
* @size: current pool size
* @max_size: maximal pool size
* @pebs: an array containing the location of all PEBs in this pool
*/
struct ubi_fm_scan_pool {
__be32 magic;
__be16 size;
__be16 max_size;
__be32 pebs[UBI_FM_MAX_POOL_SIZE];
__be32 padding[4];
} __packed;
/* ubi_fm_scan_pool is followed by nfree+nused struct ubi_fm_ec records */
/**
* struct ubi_fm_ec - stores the erase counter of a PEB
* @pnum: PEB number
* @ec: ec of this PEB
*/
struct ubi_fm_ec {
__be32 pnum;
__be32 ec;
} __packed;
/**
* struct ubi_fm_volhdr - Fastmap volume header
* it identifies the start of an eba table
* @magic: Fastmap volume header magic number (%UBI_FM_VHDR_MAGIC)
* @vol_id: volume id of the fastmapped volume
* @vol_type: type of the fastmapped volume
* @data_pad: data_pad value of the fastmapped volume
* @used_ebs: number of used LEBs within this volume
* @last_eb_bytes: number of bytes used in the last LEB
*/
struct ubi_fm_volhdr {
__be32 magic;
__be32 vol_id;
__u8 vol_type;
__u8 padding1[3];
__be32 data_pad;
__be32 used_ebs;
__be32 last_eb_bytes;
__u8 padding2[8];
} __packed;
/* struct ubi_fm_volhdr is followed by one struct ubi_fm_eba records */
/**
* struct ubi_fm_eba - denotes an association beween a PEB and LEB
* @magic: EBA table magic number
* @reserved_pebs: number of table entries
* @pnum: PEB number of LEB (LEB is the index)
*/
struct ubi_fm_eba {
__be32 magic;
__be32 reserved_pebs;
__be32 pnum[0];
} __packed;
#endif /* !__UBI_MEDIA_H__ */
......@@ -133,6 +133,17 @@ enum {
MOVE_RETRY,
};
/*
* Return codes of the fastmap sub-system
*
* UBI_NO_FASTMAP: No fastmap super block was found
* UBI_BAD_FASTMAP: A fastmap was found but it's unusable
*/
enum {
UBI_NO_FASTMAP = 1,
UBI_BAD_FASTMAP,
};
/**
* struct ubi_wl_entry - wear-leveling entry.
* @u.rb: link in the corresponding (free/used) RB-tree
......@@ -198,6 +209,41 @@ struct ubi_rename_entry {
struct ubi_volume_desc;
/**
* struct ubi_fastmap_layout - in-memory fastmap data structure.
* @e: PEBs used by the current fastmap
* @to_be_tortured: if non-zero tortured this PEB
* @used_blocks: number of used PEBs
* @max_pool_size: maximal size of the user pool
* @max_wl_pool_size: maximal size of the pool used by the WL sub-system
*/
struct ubi_fastmap_layout {
struct ubi_wl_entry *e[UBI_FM_MAX_BLOCKS];
int to_be_tortured[UBI_FM_MAX_BLOCKS];
int used_blocks;
int max_pool_size;
int max_wl_pool_size;
};
/**
* struct ubi_fm_pool - in-memory fastmap pool
* @pebs: PEBs in this pool
* @used: number of used PEBs
* @size: total number of PEBs in this pool
* @max_size: maximal size of the pool
*
* A pool gets filled with up to max_size.
* If all PEBs within the pool are used a new fastmap will be written
* to the flash and the pool gets refilled with empty PEBs.
*
*/
struct ubi_fm_pool {
int pebs[UBI_FM_MAX_POOL_SIZE];
int used;
int size;
int max_size;
};
/**
* struct ubi_volume - UBI volume description data structure.
* @dev: device object to make use of the the Linux device model
......@@ -333,9 +379,21 @@ struct ubi_wl_entry;
* @ltree: the lock tree
* @alc_mutex: serializes "atomic LEB change" operations
*
* @fm_disabled: non-zero if fastmap is disabled (default)
* @fm: in-memory data structure of the currently used fastmap
* @fm_pool: in-memory data structure of the fastmap pool
* @fm_wl_pool: in-memory data structure of the fastmap pool used by the WL
* sub-system
* @fm_mutex: serializes ubi_update_fastmap() and protects @fm_buf
* @fm_buf: vmalloc()'d buffer which holds the raw fastmap
* @fm_size: fastmap size in bytes
* @fm_sem: allows ubi_update_fastmap() to block EBA table changes
* @fm_work: fastmap work queue
*
* @used: RB-tree of used physical eraseblocks
* @erroneous: RB-tree of erroneous used physical eraseblocks
* @free: RB-tree of free physical eraseblocks
* @free_count: Contains the number of elements in @free
* @scrub: RB-tree of physical eraseblocks which need scrubbing
* @pq: protection queue (contain physical eraseblocks which are temporarily
* protected from the wear-leveling worker)
......@@ -426,10 +484,22 @@ struct ubi_device {
struct rb_root ltree;
struct mutex alc_mutex;
/* Fastmap stuff */
int fm_disabled;
struct ubi_fastmap_layout *fm;
struct ubi_fm_pool fm_pool;
struct ubi_fm_pool fm_wl_pool;
struct rw_semaphore fm_sem;
struct mutex fm_mutex;
void *fm_buf;
size_t fm_size;
struct work_struct fm_work;
/* Wear-leveling sub-system's stuff */
struct rb_root used;
struct rb_root erroneous;
struct rb_root free;
int free_count;
struct rb_root scrub;
struct list_head pq[UBI_PROT_QUEUE_LEN];
int pq_head;
......@@ -596,6 +666,32 @@ struct ubi_attach_info {
struct kmem_cache *aeb_slab_cache;
};
/**
* struct ubi_work - UBI work description data structure.
* @list: a link in the list of pending works
* @func: worker function
* @e: physical eraseblock to erase
* @vol_id: the volume ID on which this erasure is being performed
* @lnum: the logical eraseblock number
* @torture: if the physical eraseblock has to be tortured
* @anchor: produce a anchor PEB to by used by fastmap
*
* The @func pointer points to the worker function. If the @cancel argument is
* not zero, the worker has to free the resources and exit immediately. The
* worker has to return zero in case of success and a negative error code in
* case of failure.
*/
struct ubi_work {
struct list_head list;
int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
/* The below fields are only relevant to erasure works */
struct ubi_wl_entry *e;
int vol_id;
int lnum;
int torture;
int anchor;
};
#include "debug.h"
extern struct kmem_cache *ubi_wl_entry_slab;
......@@ -606,7 +702,7 @@ extern struct class *ubi_class;
extern struct mutex ubi_devices_mutex;
extern struct blocking_notifier_head ubi_notifiers;
/* scan.c */
/* attach.c */
int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips);
struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
......@@ -614,7 +710,7 @@ struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av);
struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
struct ubi_attach_info *ai);
int ubi_attach(struct ubi_device *ubi);
int ubi_attach(struct ubi_device *ubi, int force_scan);
void ubi_destroy_ai(struct ubi_attach_info *ai);
/* vtbl.c */
......@@ -664,6 +760,9 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
struct ubi_vid_hdr *vid_hdr);
int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
unsigned long long ubi_next_sqnum(struct ubi_device *ubi);
int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
struct ubi_attach_info *ai_scan);
/* wl.c */
int ubi_wl_get_peb(struct ubi_device *ubi);
......@@ -674,6 +773,12 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum);
int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai);
void ubi_wl_close(struct ubi_device *ubi);
int ubi_thread(void *u);
struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor);
int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *used_e,
int lnum, int torture);
int ubi_is_erase_work(struct ubi_work *wrk);
void ubi_refill_pools(struct ubi_device *ubi);
int ubi_ensure_anchor_pebs(struct ubi_device *ubi);
/* io.c */
int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
......@@ -711,6 +816,15 @@ void ubi_free_internal_volumes(struct ubi_device *ubi);
void ubi_do_get_device_info(struct ubi_device *ubi, struct ubi_device_info *di);
void ubi_do_get_volume_info(struct ubi_device *ubi, struct ubi_volume *vol,
struct ubi_volume_info *vi);
/* scan.c */
int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
int pnum, const struct ubi_vid_hdr *vid_hdr);
/* fastmap.c */
size_t ubi_calc_fm_size(struct ubi_device *ubi);
int ubi_update_fastmap(struct ubi_device *ubi);
int ubi_scan_fastmap(struct ubi_device *ubi, struct ubi_attach_info *ai,
int fm_anchor);
/*
* ubi_rb_for_each_entry - walk an RB-tree.
......
......@@ -135,36 +135,48 @@
*/
#define WL_MAX_FAILURES 32
/**
* struct ubi_work - UBI work description data structure.
* @list: a link in the list of pending works
* @func: worker function
* @e: physical eraseblock to erase
* @vol_id: the volume ID on which this erasure is being performed
* @lnum: the logical eraseblock number
* @torture: if the physical eraseblock has to be tortured
*
* The @func pointer points to the worker function. If the @cancel argument is
* not zero, the worker has to free the resources and exit immediately. The
* worker has to return zero in case of success and a negative error code in
* case of failure.
*/
struct ubi_work {
struct list_head list;
int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
/* The below fields are only relevant to erasure works */
struct ubi_wl_entry *e;
int vol_id;
int lnum;
int torture;
};
static int self_check_ec(struct ubi_device *ubi, int pnum, int ec);
static int self_check_in_wl_tree(const struct ubi_device *ubi,
struct ubi_wl_entry *e, struct rb_root *root);
static int self_check_in_pq(const struct ubi_device *ubi,
struct ubi_wl_entry *e);
#ifdef CONFIG_MTD_UBI_FASTMAP
/**
* update_fastmap_work_fn - calls ubi_update_fastmap from a work queue
* @wrk: the work description object
*/
static void update_fastmap_work_fn(struct work_struct *wrk)
{
struct ubi_device *ubi = container_of(wrk, struct ubi_device, fm_work);
ubi_update_fastmap(ubi);
}
/**
* ubi_ubi_is_fm_block - returns 1 if a PEB is currently used in a fastmap.
* @ubi: UBI device description object
* @pnum: the to be checked PEB
*/
static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
{
int i;
if (!ubi->fm)
return 0;
for (i = 0; i < ubi->fm->used_blocks; i++)
if (ubi->fm->e[i]->pnum == pnum)
return 1;
return 0;
}
#else
static int ubi_is_fm_block(struct ubi_device *ubi, int pnum)
{
return 0;
}
#endif
/**
* wl_tree_add - add a wear-leveling entry to a WL RB-tree.
* @e: the wear-leveling entry to add
......@@ -261,18 +273,16 @@ static int produce_free_peb(struct ubi_device *ubi)
{
int err;
spin_lock(&ubi->wl_lock);
while (!ubi->free.rb_node) {
spin_unlock(&ubi->wl_lock);
dbg_wl("do one work synchronously");
err = do_work(ubi);
if (err)
return err;
spin_lock(&ubi->wl_lock);
if (err)
return err;
}
spin_unlock(&ubi->wl_lock);
return 0;
}
......@@ -339,16 +349,18 @@ static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
/**
* find_wl_entry - find wear-leveling entry closest to certain erase counter.
* @ubi: UBI device description object
* @root: the RB-tree where to look for
* @diff: maximum possible difference from the smallest erase counter
*
* This function looks for a wear leveling entry with erase counter closest to
* min + @diff, where min is the smallest erase counter.
*/
static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int diff)
static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
struct rb_root *root, int diff)
{
struct rb_node *p;
struct ubi_wl_entry *e;
struct ubi_wl_entry *e, *prev_e = NULL;
int max;
e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
......@@ -363,35 +375,143 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int diff)
p = p->rb_left;
else {
p = p->rb_right;
prev_e = e;
e = e1;
}
}
/* If no fastmap has been written and this WL entry can be used
* as anchor PEB, hold it back and return the second best WL entry
* such that fastmap can use the anchor PEB later. */
if (prev_e && !ubi->fm_disabled &&
!ubi->fm && e->pnum < UBI_FM_MAX_START)
return prev_e;
return e;
}
/**
* ubi_wl_get_peb - get a physical eraseblock.
* find_mean_wl_entry - find wear-leveling entry with medium erase counter.
* @ubi: UBI device description object
* @root: the RB-tree where to look for
*
* This function looks for a wear leveling entry with medium erase counter,
* but not greater or equivalent than the lowest erase counter plus
* %WL_FREE_MAX_DIFF/2.
*/
static struct ubi_wl_entry *find_mean_wl_entry(struct ubi_device *ubi,
struct rb_root *root)
{
struct ubi_wl_entry *e, *first, *last;
first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb);
if (last->ec - first->ec < WL_FREE_MAX_DIFF) {
e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb);
#ifdef CONFIG_MTD_UBI_FASTMAP
/* If no fastmap has been written and this WL entry can be used
* as anchor PEB, hold it back and return the second best
* WL entry such that fastmap can use the anchor PEB later. */
if (e && !ubi->fm_disabled && !ubi->fm &&
e->pnum < UBI_FM_MAX_START)
e = rb_entry(rb_next(root->rb_node),
struct ubi_wl_entry, u.rb);
#endif
} else
e = find_wl_entry(ubi, root, WL_FREE_MAX_DIFF/2);
return e;
}
#ifdef CONFIG_MTD_UBI_FASTMAP
/**
* find_anchor_wl_entry - find wear-leveling entry to used as anchor PEB.
* @root: the RB-tree where to look for
*/
static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root)
{
struct rb_node *p;
struct ubi_wl_entry *e, *victim = NULL;
int max_ec = UBI_MAX_ERASECOUNTER;
ubi_rb_for_each_entry(p, e, root, u.rb) {
if (e->pnum < UBI_FM_MAX_START && e->ec < max_ec) {
victim = e;
max_ec = e->ec;
}
}
return victim;
}
static int anchor_pebs_avalible(struct rb_root *root)
{
struct rb_node *p;
struct ubi_wl_entry *e;
ubi_rb_for_each_entry(p, e, root, u.rb)
if (e->pnum < UBI_FM_MAX_START)
return 1;
return 0;
}
/**
* ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
* @ubi: UBI device description object
* @anchor: This PEB will be used as anchor PEB by fastmap
*
* The function returns a physical erase block with a given maximal number
* and removes it from the wl subsystem.
* Must be called with wl_lock held!
*/
struct ubi_wl_entry *ubi_wl_get_fm_peb(struct ubi_device *ubi, int anchor)
{
struct ubi_wl_entry *e = NULL;
if (!ubi->free.rb_node || (ubi->free_count - ubi->beb_rsvd_pebs < 1))
goto out;
if (anchor)
e = find_anchor_wl_entry(&ubi->free);
else
e = find_mean_wl_entry(ubi, &ubi->free);
if (!e)
goto out;
self_check_in_wl_tree(ubi, e, &ubi->free);
/* remove it from the free list,
* the wl subsystem does no longer know this erase block */
rb_erase(&e->u.rb, &ubi->free);
ubi->free_count--;
out:
return e;
}
#endif
/**
* __wl_get_peb - get a physical eraseblock.
* @ubi: UBI device description object
*
* This function returns a physical eraseblock in case of success and a
* negative error code in case of failure. Might sleep.
*/
int ubi_wl_get_peb(struct ubi_device *ubi)
static int __wl_get_peb(struct ubi_device *ubi)
{
int err;
struct ubi_wl_entry *e, *first, *last;
struct ubi_wl_entry *e;
retry:
spin_lock(&ubi->wl_lock);
if (!ubi->free.rb_node) {
if (ubi->works_count == 0) {
ubi_assert(list_empty(&ubi->works));
ubi_err("no free eraseblocks");
spin_unlock(&ubi->wl_lock);
ubi_assert(list_empty(&ubi->works));
return -ENOSPC;
}
spin_unlock(&ubi->wl_lock);
err = produce_free_peb(ubi);
if (err < 0)
......@@ -399,13 +519,11 @@ int ubi_wl_get_peb(struct ubi_device *ubi)
goto retry;
}
first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb);
if (last->ec - first->ec < WL_FREE_MAX_DIFF)
e = rb_entry(ubi->free.rb_node, struct ubi_wl_entry, u.rb);
else
e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF/2);
e = find_mean_wl_entry(ubi, &ubi->free);
if (!e) {
ubi_err("no free eraseblocks");
return -ENOSPC;
}
self_check_in_wl_tree(ubi, e, &ubi->free);
......@@ -414,10 +532,14 @@ int ubi_wl_get_peb(struct ubi_device *ubi)
* be protected from being moved for some time.
*/
rb_erase(&e->u.rb, &ubi->free);
ubi->free_count--;
dbg_wl("PEB %d EC %d", e->pnum, e->ec);
#ifndef CONFIG_MTD_UBI_FASTMAP
/* We have to enqueue e only if fastmap is disabled,
* is fastmap enabled prot_queue_add() will be called by
* ubi_wl_get_peb() after removing e from the pool. */
prot_queue_add(ubi, e);
spin_unlock(&ubi->wl_lock);
#endif
err = ubi_self_check_all_ff(ubi, e->pnum, ubi->vid_hdr_aloffset,
ubi->peb_size - ubi->vid_hdr_aloffset);
if (err) {
......@@ -428,6 +550,150 @@ int ubi_wl_get_peb(struct ubi_device *ubi)
return e->pnum;
}
#ifdef CONFIG_MTD_UBI_FASTMAP
/**
* return_unused_pool_pebs - returns unused PEB to the free tree.
* @ubi: UBI device description object
* @pool: fastmap pool description object
*/
static void return_unused_pool_pebs(struct ubi_device *ubi,
struct ubi_fm_pool *pool)
{
int i;
struct ubi_wl_entry *e;
for (i = pool->used; i < pool->size; i++) {
e = ubi->lookuptbl[pool->pebs[i]];
wl_tree_add(e, &ubi->free);
ubi->free_count++;
}
}
/**
* refill_wl_pool - refills all the fastmap pool used by the
* WL sub-system.
* @ubi: UBI device description object
*/
static void refill_wl_pool(struct ubi_device *ubi)
{
struct ubi_wl_entry *e;
struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
return_unused_pool_pebs(ubi, pool);
for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
if (!ubi->free.rb_node ||
(ubi->free_count - ubi->beb_rsvd_pebs < 5))
break;
e = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
self_check_in_wl_tree(ubi, e, &ubi->free);
rb_erase(&e->u.rb, &ubi->free);
ubi->free_count--;
pool->pebs[pool->size] = e->pnum;
}
pool->used = 0;
}
/**
* refill_wl_user_pool - refills all the fastmap pool used by ubi_wl_get_peb.
* @ubi: UBI device description object
*/
static void refill_wl_user_pool(struct ubi_device *ubi)
{
struct ubi_fm_pool *pool = &ubi->fm_pool;
return_unused_pool_pebs(ubi, pool);
for (pool->size = 0; pool->size < pool->max_size; pool->size++) {
if (!ubi->free.rb_node ||
(ubi->free_count - ubi->beb_rsvd_pebs < 1))
break;
pool->pebs[pool->size] = __wl_get_peb(ubi);
if (pool->pebs[pool->size] < 0)
break;
}
pool->used = 0;
}
/**
* ubi_refill_pools - refills all fastmap PEB pools.
* @ubi: UBI device description object
*/
void ubi_refill_pools(struct ubi_device *ubi)
{
spin_lock(&ubi->wl_lock);
refill_wl_pool(ubi);
refill_wl_user_pool(ubi);
spin_unlock(&ubi->wl_lock);
}
/* ubi_wl_get_peb - works exaclty like __wl_get_peb but keeps track of
* the fastmap pool.
*/
int ubi_wl_get_peb(struct ubi_device *ubi)
{
int ret;
struct ubi_fm_pool *pool = &ubi->fm_pool;
struct ubi_fm_pool *wl_pool = &ubi->fm_wl_pool;
if (!pool->size || !wl_pool->size || pool->used == pool->size ||
wl_pool->used == wl_pool->size)
ubi_update_fastmap(ubi);
/* we got not a single free PEB */
if (!pool->size)
ret = -ENOSPC;
else {
spin_lock(&ubi->wl_lock);
ret = pool->pebs[pool->used++];
prot_queue_add(ubi, ubi->lookuptbl[ret]);
spin_unlock(&ubi->wl_lock);
}
return ret;
}
/* get_peb_for_wl - returns a PEB to be used internally by the WL sub-system.
*
* @ubi: UBI device description object
*/
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
{
struct ubi_fm_pool *pool = &ubi->fm_wl_pool;
int pnum;
if (pool->used == pool->size || !pool->size) {
/* We cannot update the fastmap here because this
* function is called in atomic context.
* Let's fail here and refill/update it as soon as possible. */
schedule_work(&ubi->fm_work);
return NULL;
} else {
pnum = pool->pebs[pool->used++];
return ubi->lookuptbl[pnum];
}
}
#else
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
{
return find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
}
int ubi_wl_get_peb(struct ubi_device *ubi)
{
int peb;
spin_lock(&ubi->wl_lock);
peb = __wl_get_peb(ubi);
spin_unlock(&ubi->wl_lock);
return peb;
}
#endif
/**
* prot_queue_del - remove a physical eraseblock from the protection queue.
* @ubi: UBI device description object
......@@ -558,14 +824,14 @@ static void serve_prot_queue(struct ubi_device *ubi)
}
/**
* schedule_ubi_work - schedule a work.
* __schedule_ubi_work - schedule a work.
* @ubi: UBI device description object
* @wrk: the work to schedule
*
* This function adds a work defined by @wrk to the tail of the pending works
* list.
* list. Can only be used of ubi->work_sem is already held in read mode!
*/
static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
{
spin_lock(&ubi->wl_lock);
list_add_tail(&wrk->list, &ubi->works);
......@@ -576,9 +842,35 @@ static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
spin_unlock(&ubi->wl_lock);
}
/**
* schedule_ubi_work - schedule a work.
* @ubi: UBI device description object
* @wrk: the work to schedule
*
* This function adds a work defined by @wrk to the tail of the pending works
* list.
*/
static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
{
down_read(&ubi->work_sem);
__schedule_ubi_work(ubi, wrk);
up_read(&ubi->work_sem);
}
static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
int cancel);
#ifdef CONFIG_MTD_UBI_FASTMAP
/**
* ubi_is_erase_work - checks whether a work is erase work.
* @wrk: The work object to be checked
*/
int ubi_is_erase_work(struct ubi_work *wrk)
{
return wrk->func == erase_worker;
}
#endif
/**
* schedule_erase - schedule an erase work.
* @ubi: UBI device description object
......@@ -595,6 +887,9 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
{
struct ubi_work *wl_wrk;
ubi_assert(e);
ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
dbg_wl("schedule erasure of PEB %d, EC %d, torture %d",
e->pnum, e->ec, torture);
......@@ -612,6 +907,79 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
return 0;
}
/**
* do_sync_erase - run the erase worker synchronously.
* @ubi: UBI device description object
* @e: the WL entry of the physical eraseblock to erase
* @vol_id: the volume ID that last used this PEB
* @lnum: the last used logical eraseblock number for the PEB
* @torture: if the physical eraseblock has to be tortured
*
*/
static int do_sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
int vol_id, int lnum, int torture)
{
struct ubi_work *wl_wrk;
dbg_wl("sync erase of PEB %i", e->pnum);
wl_wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
if (!wl_wrk)
return -ENOMEM;
wl_wrk->e = e;
wl_wrk->vol_id = vol_id;
wl_wrk->lnum = lnum;
wl_wrk->torture = torture;
return erase_worker(ubi, wl_wrk, 0);
}
#ifdef CONFIG_MTD_UBI_FASTMAP
/**
* ubi_wl_put_fm_peb - returns a PEB used in a fastmap to the wear-leveling
* sub-system.
* see: ubi_wl_put_peb()
*
* @ubi: UBI device description object
* @fm_e: physical eraseblock to return
* @lnum: the last used logical eraseblock number for the PEB
* @torture: if this physical eraseblock has to be tortured
*/
int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
int lnum, int torture)
{
struct ubi_wl_entry *e;
int vol_id, pnum = fm_e->pnum;
dbg_wl("PEB %d", pnum);
ubi_assert(pnum >= 0);
ubi_assert(pnum < ubi->peb_count);
spin_lock(&ubi->wl_lock);
e = ubi->lookuptbl[pnum];
/* This can happen if we recovered from a fastmap the very
* first time and writing now a new one. In this case the wl system
* has never seen any PEB used by the original fastmap.
*/
if (!e) {
e = fm_e;
ubi_assert(e->ec >= 0);
ubi->lookuptbl[pnum] = e;
} else {
e->ec = fm_e->ec;
kfree(fm_e);
}
spin_unlock(&ubi->wl_lock);
vol_id = lnum ? UBI_FM_DATA_VOLUME_ID : UBI_FM_SB_VOLUME_ID;
return schedule_erase(ubi, e, vol_id, lnum, torture);
}
#endif
/**
* wear_leveling_worker - wear-leveling worker function.
* @ubi: UBI device description object
......@@ -627,6 +995,9 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
{
int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
int vol_id = -1, uninitialized_var(lnum);
#ifdef CONFIG_MTD_UBI_FASTMAP
int anchor = wrk->anchor;
#endif
struct ubi_wl_entry *e1, *e2;
struct ubi_vid_hdr *vid_hdr;
......@@ -660,14 +1031,35 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
goto out_cancel;
}
#ifdef CONFIG_MTD_UBI_FASTMAP
/* Check whether we need to produce an anchor PEB */
if (!anchor)
anchor = !anchor_pebs_avalible(&ubi->free);
if (anchor) {
e1 = find_anchor_wl_entry(&ubi->used);
if (!e1)
goto out_cancel;
e2 = get_peb_for_wl(ubi);
if (!e2)
goto out_cancel;
self_check_in_wl_tree(ubi, e1, &ubi->used);
rb_erase(&e1->u.rb, &ubi->used);
dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
} else if (!ubi->scrub.rb_node) {
#else
if (!ubi->scrub.rb_node) {
#endif
/*
* Now pick the least worn-out used physical eraseblock and a
* highly worn-out free physical eraseblock. If the erase
* counters differ much enough, start wear-leveling.
*/
e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
e2 = get_peb_for_wl(ubi);
if (!e2)
goto out_cancel;
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
dbg_wl("no WL needed: min used EC %d, max free EC %d",
......@@ -682,14 +1074,15 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
/* Perform scrubbing */
scrubbing = 1;
e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
e2 = get_peb_for_wl(ubi);
if (!e2)
goto out_cancel;
self_check_in_wl_tree(ubi, e1, &ubi->scrub);
rb_erase(&e1->u.rb, &ubi->scrub);
dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
}
self_check_in_wl_tree(ubi, e2, &ubi->free);
rb_erase(&e2->u.rb, &ubi->free);
ubi->move_from = e1;
ubi->move_to = e2;
spin_unlock(&ubi->wl_lock);
......@@ -806,7 +1199,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
ubi->move_to_put = ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
err = schedule_erase(ubi, e1, vol_id, lnum, 0);
err = do_sync_erase(ubi, e1, vol_id, lnum, 0);
if (err) {
kmem_cache_free(ubi_wl_entry_slab, e1);
if (e2)
......@@ -821,7 +1214,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
*/
dbg_wl("PEB %d (LEB %d:%d) was put meanwhile, erase",
e2->pnum, vol_id, lnum);
err = schedule_erase(ubi, e2, vol_id, lnum, 0);
err = do_sync_erase(ubi, e2, vol_id, lnum, 0);
if (err) {
kmem_cache_free(ubi_wl_entry_slab, e2);
goto out_ro;
......@@ -860,7 +1253,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
spin_unlock(&ubi->wl_lock);
ubi_free_vid_hdr(ubi, vid_hdr);
err = schedule_erase(ubi, e2, vol_id, lnum, torture);
err = do_sync_erase(ubi, e2, vol_id, lnum, torture);
if (err) {
kmem_cache_free(ubi_wl_entry_slab, e2);
goto out_ro;
......@@ -901,12 +1294,13 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
/**
* ensure_wear_leveling - schedule wear-leveling if it is needed.
* @ubi: UBI device description object
* @nested: set to non-zero if this function is called from UBI worker
*
* This function checks if it is time to start wear-leveling and schedules it
* if yes. This function returns zero in case of success and a negative error
* code in case of failure.
*/
static int ensure_wear_leveling(struct ubi_device *ubi)
static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
{
int err = 0;
struct ubi_wl_entry *e1;
......@@ -934,7 +1328,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi)
* %UBI_WL_THRESHOLD.
*/
e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
e2 = find_wl_entry(ubi, &ubi->free, WL_FREE_MAX_DIFF);
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
goto out_unlock;
......@@ -951,8 +1345,12 @@ static int ensure_wear_leveling(struct ubi_device *ubi)
goto out_cancel;
}
wrk->anchor = 0;
wrk->func = &wear_leveling_worker;
schedule_ubi_work(ubi, wrk);
if (nested)
__schedule_ubi_work(ubi, wrk);
else
schedule_ubi_work(ubi, wrk);
return err;
out_cancel:
......@@ -963,6 +1361,38 @@ static int ensure_wear_leveling(struct ubi_device *ubi)
return err;
}
#ifdef CONFIG_MTD_UBI_FASTMAP
/**
* ubi_ensure_anchor_pebs - schedule wear-leveling to produce an anchor PEB.
* @ubi: UBI device description object
*/
int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
{
struct ubi_work *wrk;
spin_lock(&ubi->wl_lock);
if (ubi->wl_scheduled) {
spin_unlock(&ubi->wl_lock);
return 0;
}
ubi->wl_scheduled = 1;
spin_unlock(&ubi->wl_lock);
wrk = kmalloc(sizeof(struct ubi_work), GFP_NOFS);
if (!wrk) {
spin_lock(&ubi->wl_lock);
ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
return -ENOMEM;
}
wrk->anchor = 1;
wrk->func = &wear_leveling_worker;
schedule_ubi_work(ubi, wrk);
return 0;
}
#endif
/**
* erase_worker - physical eraseblock erase worker function.
* @ubi: UBI device description object
......@@ -993,6 +1423,8 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
dbg_wl("erase PEB %d EC %d LEB %d:%d",
pnum, e->ec, wl_wrk->vol_id, wl_wrk->lnum);
ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
err = sync_erase(ubi, e, wl_wrk->torture);
if (!err) {
/* Fine, we've erased it successfully */
......@@ -1000,6 +1432,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
spin_lock(&ubi->wl_lock);
wl_tree_add(e, &ubi->free);
ubi->free_count++;
spin_unlock(&ubi->wl_lock);
/*
......@@ -1009,7 +1442,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
serve_prot_queue(ubi);
/* And take care about wear-leveling */
err = ensure_wear_leveling(ubi);
err = ensure_wear_leveling(ubi, 1);
return err;
}
......@@ -1247,7 +1680,7 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
* Technically scrubbing is the same as wear-leveling, so it is done
* by the WL worker.
*/
return ensure_wear_leveling(ubi);
return ensure_wear_leveling(ubi, 0);
}
/**
......@@ -1428,7 +1861,7 @@ static void cancel_pending(struct ubi_device *ubi)
*/
int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
{
int err, i;
int err, i, reserved_pebs, found_pebs = 0;
struct rb_node *rb1, *rb2;
struct ubi_ainf_volume *av;
struct ubi_ainf_peb *aeb, *tmp;
......@@ -1440,6 +1873,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
init_rwsem(&ubi->work_sem);
ubi->max_ec = ai->max_ec;
INIT_LIST_HEAD(&ubi->works);
#ifdef CONFIG_MTD_UBI_FASTMAP
INIT_WORK(&ubi->fm_work, update_fastmap_work_fn);
#endif
sprintf(ubi->bgt_name, UBI_BGT_NAME_PATTERN, ubi->ubi_num);
......@@ -1461,13 +1897,17 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
e->pnum = aeb->pnum;
e->ec = aeb->ec;
ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
ubi->lookuptbl[e->pnum] = e;
if (schedule_erase(ubi, e, aeb->vol_id, aeb->lnum, 0)) {
kmem_cache_free(ubi_wl_entry_slab, e);
goto out_free;
}
found_pebs++;
}
ubi->free_count = 0;
list_for_each_entry(aeb, &ai->free, u.list) {
cond_resched();
......@@ -1478,8 +1918,14 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
e->pnum = aeb->pnum;
e->ec = aeb->ec;
ubi_assert(e->ec >= 0);
ubi_assert(!ubi_is_fm_block(ubi, e->pnum));
wl_tree_add(e, &ubi->free);
ubi->free_count++;
ubi->lookuptbl[e->pnum] = e;
found_pebs++;
}
ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
......@@ -1493,6 +1939,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
e->pnum = aeb->pnum;
e->ec = aeb->ec;
ubi->lookuptbl[e->pnum] = e;
if (!aeb->scrub) {
dbg_wl("add PEB %d EC %d to the used tree",
e->pnum, e->ec);
......@@ -1502,22 +1949,38 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
e->pnum, e->ec);
wl_tree_add(e, &ubi->scrub);
}
found_pebs++;
}
}
if (ubi->avail_pebs < WL_RESERVED_PEBS) {
dbg_wl("found %i PEBs", found_pebs);
if (ubi->fm)
ubi_assert(ubi->good_peb_count == \
found_pebs + ubi->fm->used_blocks);
else
ubi_assert(ubi->good_peb_count == found_pebs);
reserved_pebs = WL_RESERVED_PEBS;
#ifdef CONFIG_MTD_UBI_FASTMAP
/* Reserve enough LEBs to store two fastmaps. */
reserved_pebs += (ubi->fm_size / ubi->leb_size) * 2;
#endif
if (ubi->avail_pebs < reserved_pebs) {
ubi_err("no enough physical eraseblocks (%d, need %d)",
ubi->avail_pebs, WL_RESERVED_PEBS);
ubi->avail_pebs, reserved_pebs);
if (ubi->corr_peb_count)
ubi_err("%d PEBs are corrupted and not used",
ubi->corr_peb_count);
goto out_free;
}
ubi->avail_pebs -= WL_RESERVED_PEBS;
ubi->rsvd_pebs += WL_RESERVED_PEBS;
ubi->avail_pebs -= reserved_pebs;
ubi->rsvd_pebs += reserved_pebs;
/* Schedule wear-leveling if needed */
err = ensure_wear_leveling(ubi);
err = ensure_wear_leveling(ubi, 0);
if (err)
goto out_free;
......@@ -1596,7 +2059,7 @@ static int self_check_ec(struct ubi_device *ubi, int pnum, int ec)
}
read_ec = be64_to_cpu(ec_hdr->ec);
if (ec != read_ec) {
if (ec != read_ec && read_ec - ec > 1) {
ubi_err("self-check failed for PEB %d", pnum);
ubi_err("read EC is %lld, should be %d", read_ec, ec);
dump_stack();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment