Commit 574c3fda authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'linux-next' of git://git.infradead.org/~dedekind/ubi-2.6

* 'linux-next' of git://git.infradead.org/~dedekind/ubi-2.6:
  UBI: fix checkpatch.pl warnings
  UBI: simplify PEB protection code
  UBI: prepare for protection tree improvements
  UBI: return -ENOMEM upon failing vmalloc
  UBI: document UBI ioctls
  UBI: handle write errors in WL worker
  UBI: fix error path
  UBI: some code re-structuring
  UBI: fix deadlock
  UBI: fix warnings when debugging is enabled
parents 56635f7e f2863c54
...@@ -97,6 +97,7 @@ Code Seq# Include File Comments ...@@ -97,6 +97,7 @@ Code Seq# Include File Comments
<http://linux01.gwdg.de/~alatham/ppdd.html> <http://linux01.gwdg.de/~alatham/ppdd.html>
'M' all linux/soundcard.h 'M' all linux/soundcard.h
'N' 00-1F drivers/usb/scanner.h 'N' 00-1F drivers/usb/scanner.h
'O' 00-02 include/mtd/ubi-user.h UBI
'P' all linux/soundcard.h 'P' all linux/soundcard.h
'Q' all linux/soundcard.h 'Q' all linux/soundcard.h
'R' 00-1F linux/random.h 'R' 00-1F linux/random.h
...@@ -142,6 +143,9 @@ Code Seq# Include File Comments ...@@ -142,6 +143,9 @@ Code Seq# Include File Comments
'n' 00-7F linux/ncp_fs.h 'n' 00-7F linux/ncp_fs.h
'n' E0-FF video/matrox.h matroxfb 'n' E0-FF video/matrox.h matroxfb
'o' 00-1F fs/ocfs2/ocfs2_fs.h OCFS2 'o' 00-1F fs/ocfs2/ocfs2_fs.h OCFS2
'o' 00-03 include/mtd/ubi-user.h conflict! (OCFS2 and UBI overlaps)
'o' 40-41 include/mtd/ubi-user.h UBI
'o' 01-A1 include/linux/dvb/*.h DVB
'p' 00-0F linux/phantom.h conflict! (OpenHaptics needs this) 'p' 00-0F linux/phantom.h conflict! (OpenHaptics needs this)
'p' 00-3F linux/mc146818rtc.h conflict! 'p' 00-3F linux/mc146818rtc.h conflict!
'p' 40-7F linux/nvram.h 'p' 40-7F linux/nvram.h
......
...@@ -815,19 +815,20 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset) ...@@ -815,19 +815,20 @@ int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset)
if (err) if (err)
goto out_free; goto out_free;
err = -ENOMEM;
ubi->peb_buf1 = vmalloc(ubi->peb_size); ubi->peb_buf1 = vmalloc(ubi->peb_size);
if (!ubi->peb_buf1) if (!ubi->peb_buf1)
goto out_free; goto out_free;
ubi->peb_buf2 = vmalloc(ubi->peb_size); ubi->peb_buf2 = vmalloc(ubi->peb_size);
if (!ubi->peb_buf2) if (!ubi->peb_buf2)
goto out_free; goto out_free;
#ifdef CONFIG_MTD_UBI_DEBUG #ifdef CONFIG_MTD_UBI_DEBUG
mutex_init(&ubi->dbg_buf_mutex); mutex_init(&ubi->dbg_buf_mutex);
ubi->dbg_peb_buf = vmalloc(ubi->peb_size); ubi->dbg_peb_buf = vmalloc(ubi->peb_size);
if (!ubi->dbg_peb_buf) if (!ubi->dbg_peb_buf)
goto out_free; goto out_free;
#endif #endif
err = attach_by_scanning(ubi); err = attach_by_scanning(ubi);
......
...@@ -721,7 +721,8 @@ static int rename_volumes(struct ubi_device *ubi, ...@@ -721,7 +721,8 @@ static int rename_volumes(struct ubi_device *ubi,
* It seems we need to remove volume with name @re->new_name, * It seems we need to remove volume with name @re->new_name,
* if it exists. * if it exists.
*/ */
desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name, UBI_EXCLUSIVE); desc = ubi_open_volume_nm(ubi->ubi_num, re->new_name,
UBI_EXCLUSIVE);
if (IS_ERR(desc)) { if (IS_ERR(desc)) {
err = PTR_ERR(desc); err = PTR_ERR(desc);
if (err == -ENODEV) if (err == -ENODEV)
......
...@@ -27,11 +27,11 @@ ...@@ -27,11 +27,11 @@
#define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__) #define dbg_err(fmt, ...) ubi_err(fmt, ##__VA_ARGS__)
#define ubi_assert(expr) do { \ #define ubi_assert(expr) do { \
if (unlikely(!(expr))) { \ if (unlikely(!(expr))) { \
printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \ printk(KERN_CRIT "UBI assert failed in %s at %u (pid %d)\n", \
__func__, __LINE__, current->pid); \ __func__, __LINE__, current->pid); \
ubi_dbg_dump_stack(); \ ubi_dbg_dump_stack(); \
} \ } \
} while (0) } while (0)
#define dbg_msg(fmt, ...) \ #define dbg_msg(fmt, ...) \
......
...@@ -504,12 +504,9 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum, ...@@ -504,12 +504,9 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
if (!vid_hdr) if (!vid_hdr)
return -ENOMEM; return -ENOMEM;
mutex_lock(&ubi->buf_mutex);
retry: retry:
new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN); new_pnum = ubi_wl_get_peb(ubi, UBI_UNKNOWN);
if (new_pnum < 0) { if (new_pnum < 0) {
mutex_unlock(&ubi->buf_mutex);
ubi_free_vid_hdr(ubi, vid_hdr); ubi_free_vid_hdr(ubi, vid_hdr);
return new_pnum; return new_pnum;
} }
...@@ -529,20 +526,23 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum, ...@@ -529,20 +526,23 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
goto write_error; goto write_error;
data_size = offset + len; data_size = offset + len;
mutex_lock(&ubi->buf_mutex);
memset(ubi->peb_buf1 + offset, 0xFF, len); memset(ubi->peb_buf1 + offset, 0xFF, len);
/* Read everything before the area where the write failure happened */ /* Read everything before the area where the write failure happened */
if (offset > 0) { if (offset > 0) {
err = ubi_io_read_data(ubi, ubi->peb_buf1, pnum, 0, offset); err = ubi_io_read_data(ubi, ubi->peb_buf1, pnum, 0, offset);
if (err && err != UBI_IO_BITFLIPS) if (err && err != UBI_IO_BITFLIPS)
goto out_put; goto out_unlock;
} }
memcpy(ubi->peb_buf1 + offset, buf, len); memcpy(ubi->peb_buf1 + offset, buf, len);
err = ubi_io_write_data(ubi, ubi->peb_buf1, new_pnum, 0, data_size); err = ubi_io_write_data(ubi, ubi->peb_buf1, new_pnum, 0, data_size);
if (err) if (err) {
mutex_unlock(&ubi->buf_mutex);
goto write_error; goto write_error;
}
mutex_unlock(&ubi->buf_mutex); mutex_unlock(&ubi->buf_mutex);
ubi_free_vid_hdr(ubi, vid_hdr); ubi_free_vid_hdr(ubi, vid_hdr);
...@@ -553,8 +553,9 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum, ...@@ -553,8 +553,9 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
ubi_msg("data was successfully recovered"); ubi_msg("data was successfully recovered");
return 0; return 0;
out_put: out_unlock:
mutex_unlock(&ubi->buf_mutex); mutex_unlock(&ubi->buf_mutex);
out_put:
ubi_wl_put_peb(ubi, new_pnum, 1); ubi_wl_put_peb(ubi, new_pnum, 1);
ubi_free_vid_hdr(ubi, vid_hdr); ubi_free_vid_hdr(ubi, vid_hdr);
return err; return err;
...@@ -567,7 +568,6 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum, ...@@ -567,7 +568,6 @@ static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
ubi_warn("failed to write to PEB %d", new_pnum); ubi_warn("failed to write to PEB %d", new_pnum);
ubi_wl_put_peb(ubi, new_pnum, 1); ubi_wl_put_peb(ubi, new_pnum, 1);
if (++tries > UBI_IO_RETRIES) { if (++tries > UBI_IO_RETRIES) {
mutex_unlock(&ubi->buf_mutex);
ubi_free_vid_hdr(ubi, vid_hdr); ubi_free_vid_hdr(ubi, vid_hdr);
return err; return err;
} }
...@@ -949,10 +949,14 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol, ...@@ -949,10 +949,14 @@ int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
* This function copies logical eraseblock from physical eraseblock @from to * This function copies logical eraseblock from physical eraseblock @from to
* physical eraseblock @to. The @vid_hdr buffer may be changed by this * physical eraseblock @to. The @vid_hdr buffer may be changed by this
* function. Returns: * function. Returns:
* o %0 in case of success; * o %0 in case of success;
* o %1 if the operation was canceled and should be tried later (e.g., * o %1 if the operation was canceled because the volume is being deleted
* because a bit-flip was detected at the target PEB); * or because the PEB was put meanwhile;
* o %2 if the volume is being deleted and this LEB should not be moved. * o %2 if the operation was canceled because there was a write error to the
* target PEB;
* o %-EAGAIN if the operation was canceled because a bit-flip was detected
* in the target PEB;
* o a negative error code in case of failure.
*/ */
int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
struct ubi_vid_hdr *vid_hdr) struct ubi_vid_hdr *vid_hdr)
...@@ -978,7 +982,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, ...@@ -978,7 +982,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
/* /*
* Note, we may race with volume deletion, which means that the volume * Note, we may race with volume deletion, which means that the volume
* this logical eraseblock belongs to might be being deleted. Since the * this logical eraseblock belongs to might be being deleted. Since the
* volume deletion unmaps all the volume's logical eraseblocks, it will * volume deletion un-maps all the volume's logical eraseblocks, it will
* be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish. * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
*/ */
vol = ubi->volumes[idx]; vol = ubi->volumes[idx];
...@@ -986,7 +990,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, ...@@ -986,7 +990,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
/* No need to do further work, cancel */ /* No need to do further work, cancel */
dbg_eba("volume %d is being removed, cancel", vol_id); dbg_eba("volume %d is being removed, cancel", vol_id);
spin_unlock(&ubi->volumes_lock); spin_unlock(&ubi->volumes_lock);
return 2; return 1;
} }
spin_unlock(&ubi->volumes_lock); spin_unlock(&ubi->volumes_lock);
...@@ -1023,7 +1027,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, ...@@ -1023,7 +1027,7 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
/* /*
* OK, now the LEB is locked and we can safely start moving it. Since * OK, now the LEB is locked and we can safely start moving it. Since
* this function utilizes thie @ubi->peb1_buf buffer which is shared * this function utilizes the @ubi->peb1_buf buffer which is shared
* with some other functions, so lock the buffer by taking the * with some other functions, so lock the buffer by taking the
* @ubi->buf_mutex. * @ubi->buf_mutex.
*/ */
...@@ -1068,8 +1072,11 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, ...@@ -1068,8 +1072,11 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi)); vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
err = ubi_io_write_vid_hdr(ubi, to, vid_hdr); err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
if (err) if (err) {
if (err == -EIO)
err = 2;
goto out_unlock_buf; goto out_unlock_buf;
}
cond_resched(); cond_resched();
...@@ -1079,14 +1086,17 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, ...@@ -1079,14 +1086,17 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
if (err != UBI_IO_BITFLIPS) if (err != UBI_IO_BITFLIPS)
ubi_warn("cannot read VID header back from PEB %d", to); ubi_warn("cannot read VID header back from PEB %d", to);
else else
err = 1; err = -EAGAIN;
goto out_unlock_buf; goto out_unlock_buf;
} }
if (data_size > 0) { if (data_size > 0) {
err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size); err = ubi_io_write_data(ubi, ubi->peb_buf1, to, 0, aldata_size);
if (err) if (err) {
if (err == -EIO)
err = 2;
goto out_unlock_buf; goto out_unlock_buf;
}
cond_resched(); cond_resched();
...@@ -1101,15 +1111,16 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, ...@@ -1101,15 +1111,16 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
ubi_warn("cannot read data back from PEB %d", ubi_warn("cannot read data back from PEB %d",
to); to);
else else
err = 1; err = -EAGAIN;
goto out_unlock_buf; goto out_unlock_buf;
} }
cond_resched(); cond_resched();
if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) { if (memcmp(ubi->peb_buf1, ubi->peb_buf2, aldata_size)) {
ubi_warn("read data back from PEB %d - it is different", ubi_warn("read data back from PEB %d and it is "
to); "different", to);
err = -EINVAL;
goto out_unlock_buf; goto out_unlock_buf;
} }
} }
......
...@@ -637,8 +637,6 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, ...@@ -637,8 +637,6 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
dbg_io("read EC header from PEB %d", pnum); dbg_io("read EC header from PEB %d", pnum);
ubi_assert(pnum >= 0 && pnum < ubi->peb_count); ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
if (UBI_IO_DEBUG)
verbose = 1;
err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE); err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
if (err) { if (err) {
...@@ -685,6 +683,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, ...@@ -685,6 +683,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
if (verbose) if (verbose)
ubi_warn("no EC header found at PEB %d, " ubi_warn("no EC header found at PEB %d, "
"only 0xFF bytes", pnum); "only 0xFF bytes", pnum);
else if (UBI_IO_DEBUG)
dbg_msg("no EC header found at PEB %d, "
"only 0xFF bytes", pnum);
return UBI_IO_PEB_EMPTY; return UBI_IO_PEB_EMPTY;
} }
...@@ -696,7 +697,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, ...@@ -696,7 +697,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
ubi_warn("bad magic number at PEB %d: %08x instead of " ubi_warn("bad magic number at PEB %d: %08x instead of "
"%08x", pnum, magic, UBI_EC_HDR_MAGIC); "%08x", pnum, magic, UBI_EC_HDR_MAGIC);
ubi_dbg_dump_ec_hdr(ec_hdr); ubi_dbg_dump_ec_hdr(ec_hdr);
} } else if (UBI_IO_DEBUG)
dbg_msg("bad magic number at PEB %d: %08x instead of "
"%08x", pnum, magic, UBI_EC_HDR_MAGIC);
return UBI_IO_BAD_EC_HDR; return UBI_IO_BAD_EC_HDR;
} }
...@@ -708,7 +711,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum, ...@@ -708,7 +711,9 @@ int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
ubi_warn("bad EC header CRC at PEB %d, calculated " ubi_warn("bad EC header CRC at PEB %d, calculated "
"%#08x, read %#08x", pnum, crc, hdr_crc); "%#08x, read %#08x", pnum, crc, hdr_crc);
ubi_dbg_dump_ec_hdr(ec_hdr); ubi_dbg_dump_ec_hdr(ec_hdr);
} } else if (UBI_IO_DEBUG)
dbg_msg("bad EC header CRC at PEB %d, calculated "
"%#08x, read %#08x", pnum, crc, hdr_crc);
return UBI_IO_BAD_EC_HDR; return UBI_IO_BAD_EC_HDR;
} }
...@@ -912,8 +917,6 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, ...@@ -912,8 +917,6 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
dbg_io("read VID header from PEB %d", pnum); dbg_io("read VID header from PEB %d", pnum);
ubi_assert(pnum >= 0 && pnum < ubi->peb_count); ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
if (UBI_IO_DEBUG)
verbose = 1;
p = (char *)vid_hdr - ubi->vid_hdr_shift; p = (char *)vid_hdr - ubi->vid_hdr_shift;
err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset, err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
...@@ -960,6 +963,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, ...@@ -960,6 +963,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
if (verbose) if (verbose)
ubi_warn("no VID header found at PEB %d, " ubi_warn("no VID header found at PEB %d, "
"only 0xFF bytes", pnum); "only 0xFF bytes", pnum);
else if (UBI_IO_DEBUG)
dbg_msg("no VID header found at PEB %d, "
"only 0xFF bytes", pnum);
return UBI_IO_PEB_FREE; return UBI_IO_PEB_FREE;
} }
...@@ -971,7 +977,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, ...@@ -971,7 +977,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
ubi_warn("bad magic number at PEB %d: %08x instead of " ubi_warn("bad magic number at PEB %d: %08x instead of "
"%08x", pnum, magic, UBI_VID_HDR_MAGIC); "%08x", pnum, magic, UBI_VID_HDR_MAGIC);
ubi_dbg_dump_vid_hdr(vid_hdr); ubi_dbg_dump_vid_hdr(vid_hdr);
} } else if (UBI_IO_DEBUG)
dbg_msg("bad magic number at PEB %d: %08x instead of "
"%08x", pnum, magic, UBI_VID_HDR_MAGIC);
return UBI_IO_BAD_VID_HDR; return UBI_IO_BAD_VID_HDR;
} }
...@@ -983,7 +991,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum, ...@@ -983,7 +991,9 @@ int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
ubi_warn("bad CRC at PEB %d, calculated %#08x, " ubi_warn("bad CRC at PEB %d, calculated %#08x, "
"read %#08x", pnum, crc, hdr_crc); "read %#08x", pnum, crc, hdr_crc);
ubi_dbg_dump_vid_hdr(vid_hdr); ubi_dbg_dump_vid_hdr(vid_hdr);
} } else if (UBI_IO_DEBUG)
dbg_msg("bad CRC at PEB %d, calculated %#08x, "
"read %#08x", pnum, crc, hdr_crc);
return UBI_IO_BAD_VID_HDR; return UBI_IO_BAD_VID_HDR;
} }
...@@ -1024,7 +1034,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum, ...@@ -1024,7 +1034,7 @@ int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
err = paranoid_check_peb_ec_hdr(ubi, pnum); err = paranoid_check_peb_ec_hdr(ubi, pnum);
if (err) if (err)
return err > 0 ? -EINVAL: err; return err > 0 ? -EINVAL : err;
vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC); vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC);
vid_hdr->version = UBI_VERSION; vid_hdr->version = UBI_VERSION;
......
...@@ -73,6 +73,13 @@ ...@@ -73,6 +73,13 @@
*/ */
#define UBI_IO_RETRIES 3 #define UBI_IO_RETRIES 3
/*
* Length of the protection queue. The length is effectively equivalent to the
* number of (global) erase cycles PEBs are protected from the wear-leveling
* worker.
*/
#define UBI_PROT_QUEUE_LEN 10
/* /*
* Error codes returned by the I/O sub-system. * Error codes returned by the I/O sub-system.
* *
...@@ -95,7 +102,8 @@ enum { ...@@ -95,7 +102,8 @@ enum {
/** /**
* struct ubi_wl_entry - wear-leveling entry. * struct ubi_wl_entry - wear-leveling entry.
* @rb: link in the corresponding RB-tree * @u.rb: link in the corresponding (free/used) RB-tree
* @u.list: link in the protection queue
* @ec: erase counter * @ec: erase counter
* @pnum: physical eraseblock number * @pnum: physical eraseblock number
* *
...@@ -104,7 +112,10 @@ enum { ...@@ -104,7 +112,10 @@ enum {
* RB-trees. See WL sub-system for details. * RB-trees. See WL sub-system for details.
*/ */
struct ubi_wl_entry { struct ubi_wl_entry {
struct rb_node rb; union {
struct rb_node rb;
struct list_head list;
} u;
int ec; int ec;
int pnum; int pnum;
}; };
...@@ -288,7 +299,7 @@ struct ubi_wl_entry; ...@@ -288,7 +299,7 @@ struct ubi_wl_entry;
* @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling * @beb_rsvd_level: normal level of PEBs reserved for bad PEB handling
* *
* @autoresize_vol_id: ID of the volume which has to be auto-resized at the end * @autoresize_vol_id: ID of the volume which has to be auto-resized at the end
* of UBI ititializetion * of UBI initialization
* @vtbl_slots: how many slots are available in the volume table * @vtbl_slots: how many slots are available in the volume table
* @vtbl_size: size of the volume table in bytes * @vtbl_size: size of the volume table in bytes
* @vtbl: in-RAM volume table copy * @vtbl: in-RAM volume table copy
...@@ -306,18 +317,17 @@ struct ubi_wl_entry; ...@@ -306,18 +317,17 @@ struct ubi_wl_entry;
* @used: RB-tree of used physical eraseblocks * @used: RB-tree of used physical eraseblocks
* @free: RB-tree of free physical eraseblocks * @free: RB-tree of free physical eraseblocks
* @scrub: RB-tree of physical eraseblocks which need scrubbing * @scrub: RB-tree of physical eraseblocks which need scrubbing
* @prot: protection trees * @pq: protection queue (contain physical eraseblocks which are temporarily
* @prot.pnum: protection tree indexed by physical eraseblock numbers * protected from the wear-leveling worker)
* @prot.aec: protection tree indexed by absolute erase counter value * @pq_head: protection queue head
* @wl_lock: protects the @used, @free, @prot, @lookuptbl, @abs_ec, @move_from, * @wl_lock: protects the @used, @free, @pq, @pq_head, @lookuptbl, @move_from,
* @move_to, @move_to_put @erase_pending, @wl_scheduled, and @works * @move_to, @move_to_put @erase_pending, @wl_scheduled and @works
* fields * fields
* @move_mutex: serializes eraseblock moves * @move_mutex: serializes eraseblock moves
* @work_sem: sycnhronizes the WL worker with use tasks * @work_sem: synchronizes the WL worker with use tasks
* @wl_scheduled: non-zero if the wear-leveling was scheduled * @wl_scheduled: non-zero if the wear-leveling was scheduled
* @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any * @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any
* physical eraseblock * physical eraseblock
* @abs_ec: absolute erase counter
* @move_from: physical eraseblock from where the data is being moved * @move_from: physical eraseblock from where the data is being moved
* @move_to: physical eraseblock where the data is being moved to * @move_to: physical eraseblock where the data is being moved to
* @move_to_put: if the "to" PEB was put * @move_to_put: if the "to" PEB was put
...@@ -351,11 +361,11 @@ struct ubi_wl_entry; ...@@ -351,11 +361,11 @@ struct ubi_wl_entry;
* *
* @peb_buf1: a buffer of PEB size used for different purposes * @peb_buf1: a buffer of PEB size used for different purposes
* @peb_buf2: another buffer of PEB size used for different purposes * @peb_buf2: another buffer of PEB size used for different purposes
* @buf_mutex: proptects @peb_buf1 and @peb_buf2 * @buf_mutex: protects @peb_buf1 and @peb_buf2
* @ckvol_mutex: serializes static volume checking when opening * @ckvol_mutex: serializes static volume checking when opening
* @mult_mutex: serializes operations on multiple volumes, like re-nameing * @mult_mutex: serializes operations on multiple volumes, like re-naming
* @dbg_peb_buf: buffer of PEB size used for debugging * @dbg_peb_buf: buffer of PEB size used for debugging
* @dbg_buf_mutex: proptects @dbg_peb_buf * @dbg_buf_mutex: protects @dbg_peb_buf
*/ */
struct ubi_device { struct ubi_device {
struct cdev cdev; struct cdev cdev;
...@@ -392,16 +402,13 @@ struct ubi_device { ...@@ -392,16 +402,13 @@ struct ubi_device {
struct rb_root used; struct rb_root used;
struct rb_root free; struct rb_root free;
struct rb_root scrub; struct rb_root scrub;
struct { struct list_head pq[UBI_PROT_QUEUE_LEN];
struct rb_root pnum; int pq_head;
struct rb_root aec;
} prot;
spinlock_t wl_lock; spinlock_t wl_lock;
struct mutex move_mutex; struct mutex move_mutex;
struct rw_semaphore work_sem; struct rw_semaphore work_sem;
int wl_scheduled; int wl_scheduled;
struct ubi_wl_entry **lookuptbl; struct ubi_wl_entry **lookuptbl;
unsigned long long abs_ec;
struct ubi_wl_entry *move_from; struct ubi_wl_entry *move_from;
struct ubi_wl_entry *move_to; struct ubi_wl_entry *move_to;
int move_to_put; int move_to_put;
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
* UBI wear-leveling sub-system. * UBI wear-leveling sub-system.
* *
* This sub-system is responsible for wear-leveling. It works in terms of * This sub-system is responsible for wear-leveling. It works in terms of
* physical* eraseblocks and erase counters and knows nothing about logical * physical eraseblocks and erase counters and knows nothing about logical
* eraseblocks, volumes, etc. From this sub-system's perspective all physical * eraseblocks, volumes, etc. From this sub-system's perspective all physical
* eraseblocks are of two types - used and free. Used physical eraseblocks are * eraseblocks are of two types - used and free. Used physical eraseblocks are
* those that were "get" by the 'ubi_wl_get_peb()' function, and free physical * those that were "get" by the 'ubi_wl_get_peb()' function, and free physical
...@@ -55,8 +55,39 @@ ...@@ -55,8 +55,39 @@
* *
* As it was said, for the UBI sub-system all physical eraseblocks are either * As it was said, for the UBI sub-system all physical eraseblocks are either
* "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while * "free" or "used". Free eraseblock are kept in the @wl->free RB-tree, while
* used eraseblocks are kept in a set of different RB-trees: @wl->used, * used eraseblocks are kept in @wl->used or @wl->scrub RB-trees, or
* @wl->prot.pnum, @wl->prot.aec, and @wl->scrub. * (temporarily) in the @wl->pq queue.
*
* When the WL sub-system returns a physical eraseblock, the physical
* eraseblock is protected from being moved for some "time". For this reason,
* the physical eraseblock is not directly moved from the @wl->free tree to the
* @wl->used tree. There is a protection queue in between where this
* physical eraseblock is temporarily stored (@wl->pq).
*
* All this protection stuff is needed because:
* o we don't want to move physical eraseblocks just after we have given them
* to the user; instead, we first want to let users fill them up with data;
*
* o there is a chance that the user will put the physical eraseblock very
* soon, so it makes sense not to move it for some time, but wait; this is
* especially important in case of "short term" physical eraseblocks.
*
* Physical eraseblocks stay protected only for limited time. But the "time" is
* measured in erase cycles in this case. This is implemented with help of the
* protection queue. Eraseblocks are put to the tail of this queue when they
* are returned by the 'ubi_wl_get_peb()', and eraseblocks are removed from the
* head of the queue on each erase operation (for any eraseblock). So the
* length of the queue defines how may (global) erase cycles PEBs are protected.
*
* To put it differently, each physical eraseblock has 2 main states: free and
* used. The former state corresponds to the @wl->free tree. The latter state
* is split up on several sub-states:
* o the WL movement is allowed (@wl->used tree);
* o the WL movement is temporarily prohibited (@wl->pq queue);
* o scrubbing is needed (@wl->scrub tree).
*
* Depending on the sub-state, wear-leveling entries of the used physical
* eraseblocks may be kept in one of those structures.
* *
* Note, in this implementation, we keep a small in-RAM object for each physical * Note, in this implementation, we keep a small in-RAM object for each physical
* eraseblock. This is surely not a scalable solution. But it appears to be good * eraseblock. This is surely not a scalable solution. But it appears to be good
...@@ -70,9 +101,6 @@ ...@@ -70,9 +101,6 @@
* target PEB, we pick a PEB with the highest EC if our PEB is "old" and we * target PEB, we pick a PEB with the highest EC if our PEB is "old" and we
* pick target PEB with an average EC if our PEB is not very "old". This is a * pick target PEB with an average EC if our PEB is not very "old". This is a
* room for future re-works of the WL sub-system. * room for future re-works of the WL sub-system.
*
* Note: the stuff with protection trees looks too complex and is difficult to
* understand. Should be fixed.
*/ */
#include <linux/slab.h> #include <linux/slab.h>
...@@ -84,14 +112,6 @@ ...@@ -84,14 +112,6 @@
/* Number of physical eraseblocks reserved for wear-leveling purposes */ /* Number of physical eraseblocks reserved for wear-leveling purposes */
#define WL_RESERVED_PEBS 1 #define WL_RESERVED_PEBS 1
/*
* How many erase cycles are short term, unknown, and long term physical
* eraseblocks protected.
*/
#define ST_PROTECTION 16
#define U_PROTECTION 10
#define LT_PROTECTION 4
/* /*
* Maximum difference between two erase counters. If this threshold is * Maximum difference between two erase counters. If this threshold is
* exceeded, the WL sub-system starts moving data from used physical * exceeded, the WL sub-system starts moving data from used physical
...@@ -119,65 +139,10 @@ ...@@ -119,65 +139,10 @@
*/ */
#define WL_MAX_FAILURES 32 #define WL_MAX_FAILURES 32
/**
* struct ubi_wl_prot_entry - PEB protection entry.
* @rb_pnum: link in the @wl->prot.pnum RB-tree
* @rb_aec: link in the @wl->prot.aec RB-tree
* @abs_ec: the absolute erase counter value when the protection ends
* @e: the wear-leveling entry of the physical eraseblock under protection
*
* When the WL sub-system returns a physical eraseblock, the physical
* eraseblock is protected from being moved for some "time". For this reason,
* the physical eraseblock is not directly moved from the @wl->free tree to the
* @wl->used tree. There is one more tree in between where this physical
* eraseblock is temporarily stored (@wl->prot).
*
* All this protection stuff is needed because:
* o we don't want to move physical eraseblocks just after we have given them
* to the user; instead, we first want to let users fill them up with data;
*
* o there is a chance that the user will put the physical eraseblock very
* soon, so it makes sense not to move it for some time, but wait; this is
* especially important in case of "short term" physical eraseblocks.
*
* Physical eraseblocks stay protected only for limited time. But the "time" is
* measured in erase cycles in this case. This is implemented with help of the
* absolute erase counter (@wl->abs_ec). When it reaches certain value, the
* physical eraseblocks are moved from the protection trees (@wl->prot.*) to
* the @wl->used tree.
*
* Protected physical eraseblocks are searched by physical eraseblock number
* (when they are put) and by the absolute erase counter (to check if it is
* time to move them to the @wl->used tree). So there are actually 2 RB-trees
* storing the protected physical eraseblocks: @wl->prot.pnum and
* @wl->prot.aec. They are referred to as the "protection" trees. The
* first one is indexed by the physical eraseblock number. The second one is
* indexed by the absolute erase counter. Both trees store
* &struct ubi_wl_prot_entry objects.
*
* Each physical eraseblock has 2 main states: free and used. The former state
* corresponds to the @wl->free tree. The latter state is split up on several
* sub-states:
* o the WL movement is allowed (@wl->used tree);
* o the WL movement is temporarily prohibited (@wl->prot.pnum and
* @wl->prot.aec trees);
* o scrubbing is needed (@wl->scrub tree).
*
* Depending on the sub-state, wear-leveling entries of the used physical
* eraseblocks may be kept in one of those trees.
*/
struct ubi_wl_prot_entry {
struct rb_node rb_pnum;
struct rb_node rb_aec;
unsigned long long abs_ec;
struct ubi_wl_entry *e;
};
/** /**
* struct ubi_work - UBI work description data structure. * struct ubi_work - UBI work description data structure.
* @list: a link in the list of pending works * @list: a link in the list of pending works
* @func: worker function * @func: worker function
* @priv: private data of the worker function
* @e: physical eraseblock to erase * @e: physical eraseblock to erase
* @torture: if the physical eraseblock has to be tortured * @torture: if the physical eraseblock has to be tortured
* *
...@@ -198,9 +163,11 @@ struct ubi_work { ...@@ -198,9 +163,11 @@ struct ubi_work {
static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec); static int paranoid_check_ec(struct ubi_device *ubi, int pnum, int ec);
static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
struct rb_root *root); struct rb_root *root);
static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e);
#else #else
#define paranoid_check_ec(ubi, pnum, ec) 0 #define paranoid_check_ec(ubi, pnum, ec) 0
#define paranoid_check_in_wl_tree(e, root) #define paranoid_check_in_wl_tree(e, root)
#define paranoid_check_in_pq(ubi, e) 0
#endif #endif
/** /**
...@@ -220,7 +187,7 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root) ...@@ -220,7 +187,7 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
struct ubi_wl_entry *e1; struct ubi_wl_entry *e1;
parent = *p; parent = *p;
e1 = rb_entry(parent, struct ubi_wl_entry, rb); e1 = rb_entry(parent, struct ubi_wl_entry, u.rb);
if (e->ec < e1->ec) if (e->ec < e1->ec)
p = &(*p)->rb_left; p = &(*p)->rb_left;
...@@ -235,8 +202,8 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root) ...@@ -235,8 +202,8 @@ static void wl_tree_add(struct ubi_wl_entry *e, struct rb_root *root)
} }
} }
rb_link_node(&e->rb, parent, p); rb_link_node(&e->u.rb, parent, p);
rb_insert_color(&e->rb, root); rb_insert_color(&e->u.rb, root);
} }
/** /**
...@@ -331,7 +298,7 @@ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root) ...@@ -331,7 +298,7 @@ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
while (p) { while (p) {
struct ubi_wl_entry *e1; struct ubi_wl_entry *e1;
e1 = rb_entry(p, struct ubi_wl_entry, rb); e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
if (e->pnum == e1->pnum) { if (e->pnum == e1->pnum) {
ubi_assert(e == e1); ubi_assert(e == e1);
...@@ -355,50 +322,24 @@ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root) ...@@ -355,50 +322,24 @@ static int in_wl_tree(struct ubi_wl_entry *e, struct rb_root *root)
} }
/** /**
* prot_tree_add - add physical eraseblock to protection trees. * prot_queue_add - add physical eraseblock to the protection queue.
* @ubi: UBI device description object * @ubi: UBI device description object
* @e: the physical eraseblock to add * @e: the physical eraseblock to add
* @pe: protection entry object to use
* @abs_ec: absolute erase counter value when this physical eraseblock has
* to be removed from the protection trees.
* *
* @wl->lock has to be locked. * This function adds @e to the tail of the protection queue @ubi->pq, where
* @e will stay for %UBI_PROT_QUEUE_LEN erase operations and will be
* temporarily protected from the wear-leveling worker. Note, @wl->lock has to
* be locked.
*/ */
static void prot_tree_add(struct ubi_device *ubi, struct ubi_wl_entry *e, static void prot_queue_add(struct ubi_device *ubi, struct ubi_wl_entry *e)
struct ubi_wl_prot_entry *pe, int abs_ec)
{ {
struct rb_node **p, *parent = NULL; int pq_tail = ubi->pq_head - 1;
struct ubi_wl_prot_entry *pe1;
pe->e = e;
pe->abs_ec = ubi->abs_ec + abs_ec;
p = &ubi->prot.pnum.rb_node;
while (*p) {
parent = *p;
pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_pnum);
if (e->pnum < pe1->e->pnum)
p = &(*p)->rb_left;
else
p = &(*p)->rb_right;
}
rb_link_node(&pe->rb_pnum, parent, p);
rb_insert_color(&pe->rb_pnum, &ubi->prot.pnum);
p = &ubi->prot.aec.rb_node;
parent = NULL;
while (*p) {
parent = *p;
pe1 = rb_entry(parent, struct ubi_wl_prot_entry, rb_aec);
if (pe->abs_ec < pe1->abs_ec) if (pq_tail < 0)
p = &(*p)->rb_left; pq_tail = UBI_PROT_QUEUE_LEN - 1;
else ubi_assert(pq_tail >= 0 && pq_tail < UBI_PROT_QUEUE_LEN);
p = &(*p)->rb_right; list_add_tail(&e->u.list, &ubi->pq[pq_tail]);
} dbg_wl("added PEB %d EC %d to the protection queue", e->pnum, e->ec);
rb_link_node(&pe->rb_aec, parent, p);
rb_insert_color(&pe->rb_aec, &ubi->prot.aec);
} }
/** /**
...@@ -414,14 +355,14 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max) ...@@ -414,14 +355,14 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
struct rb_node *p; struct rb_node *p;
struct ubi_wl_entry *e; struct ubi_wl_entry *e;
e = rb_entry(rb_first(root), struct ubi_wl_entry, rb); e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb);
max += e->ec; max += e->ec;
p = root->rb_node; p = root->rb_node;
while (p) { while (p) {
struct ubi_wl_entry *e1; struct ubi_wl_entry *e1;
e1 = rb_entry(p, struct ubi_wl_entry, rb); e1 = rb_entry(p, struct ubi_wl_entry, u.rb);
if (e1->ec >= max) if (e1->ec >= max)
p = p->rb_left; p = p->rb_left;
else { else {
...@@ -443,17 +384,12 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max) ...@@ -443,17 +384,12 @@ static struct ubi_wl_entry *find_wl_entry(struct rb_root *root, int max)
*/ */
int ubi_wl_get_peb(struct ubi_device *ubi, int dtype) int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
{ {
int err, protect, medium_ec; int err, medium_ec;
struct ubi_wl_entry *e, *first, *last; struct ubi_wl_entry *e, *first, *last;
struct ubi_wl_prot_entry *pe;
ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM || ubi_assert(dtype == UBI_LONGTERM || dtype == UBI_SHORTTERM ||
dtype == UBI_UNKNOWN); dtype == UBI_UNKNOWN);
pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS);
if (!pe)
return -ENOMEM;
retry: retry:
spin_lock(&ubi->wl_lock); spin_lock(&ubi->wl_lock);
if (!ubi->free.rb_node) { if (!ubi->free.rb_node) {
...@@ -461,16 +397,13 @@ int ubi_wl_get_peb(struct ubi_device *ubi, int dtype) ...@@ -461,16 +397,13 @@ int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
ubi_assert(list_empty(&ubi->works)); ubi_assert(list_empty(&ubi->works));
ubi_err("no free eraseblocks"); ubi_err("no free eraseblocks");
spin_unlock(&ubi->wl_lock); spin_unlock(&ubi->wl_lock);
kfree(pe);
return -ENOSPC; return -ENOSPC;
} }
spin_unlock(&ubi->wl_lock); spin_unlock(&ubi->wl_lock);
err = produce_free_peb(ubi); err = produce_free_peb(ubi);
if (err < 0) { if (err < 0)
kfree(pe);
return err; return err;
}
goto retry; goto retry;
} }
...@@ -483,7 +416,6 @@ int ubi_wl_get_peb(struct ubi_device *ubi, int dtype) ...@@ -483,7 +416,6 @@ int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
* %WL_FREE_MAX_DIFF. * %WL_FREE_MAX_DIFF.
*/ */
e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); e = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
protect = LT_PROTECTION;
break; break;
case UBI_UNKNOWN: case UBI_UNKNOWN:
/* /*
...@@ -492,81 +424,63 @@ int ubi_wl_get_peb(struct ubi_device *ubi, int dtype) ...@@ -492,81 +424,63 @@ int ubi_wl_get_peb(struct ubi_device *ubi, int dtype)
* eraseblock with erase counter greater or equivalent than the * eraseblock with erase counter greater or equivalent than the
* lowest erase counter plus %WL_FREE_MAX_DIFF. * lowest erase counter plus %WL_FREE_MAX_DIFF.
*/ */
first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb); first = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry,
last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, rb); u.rb);
last = rb_entry(rb_last(&ubi->free), struct ubi_wl_entry, u.rb);
if (last->ec - first->ec < WL_FREE_MAX_DIFF) if (last->ec - first->ec < WL_FREE_MAX_DIFF)
e = rb_entry(ubi->free.rb_node, e = rb_entry(ubi->free.rb_node,
struct ubi_wl_entry, rb); struct ubi_wl_entry, u.rb);
else { else {
medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2; medium_ec = (first->ec + WL_FREE_MAX_DIFF)/2;
e = find_wl_entry(&ubi->free, medium_ec); e = find_wl_entry(&ubi->free, medium_ec);
} }
protect = U_PROTECTION;
break; break;
case UBI_SHORTTERM: case UBI_SHORTTERM:
/* /*
* For short term data we pick a physical eraseblock with the * For short term data we pick a physical eraseblock with the
* lowest erase counter as we expect it will be erased soon. * lowest erase counter as we expect it will be erased soon.
*/ */
e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, rb); e = rb_entry(rb_first(&ubi->free), struct ubi_wl_entry, u.rb);
protect = ST_PROTECTION;
break; break;
default: default:
protect = 0;
e = NULL;
BUG(); BUG();
} }
paranoid_check_in_wl_tree(e, &ubi->free);
/* /*
* Move the physical eraseblock to the protection trees where it will * Move the physical eraseblock to the protection queue where it will
* be protected from being moved for some time. * be protected from being moved for some time.
*/ */
paranoid_check_in_wl_tree(e, &ubi->free); rb_erase(&e->u.rb, &ubi->free);
rb_erase(&e->rb, &ubi->free); dbg_wl("PEB %d EC %d", e->pnum, e->ec);
prot_tree_add(ubi, e, pe, protect); prot_queue_add(ubi, e);
dbg_wl("PEB %d EC %d, protection %d", e->pnum, e->ec, protect);
spin_unlock(&ubi->wl_lock); spin_unlock(&ubi->wl_lock);
return e->pnum; return e->pnum;
} }
/** /**
* prot_tree_del - remove a physical eraseblock from the protection trees * prot_queue_del - remove a physical eraseblock from the protection queue.
* @ubi: UBI device description object * @ubi: UBI device description object
* @pnum: the physical eraseblock to remove * @pnum: the physical eraseblock to remove
* *
* This function returns PEB @pnum from the protection trees and returns zero * This function deletes PEB @pnum from the protection queue and returns zero
* in case of success and %-ENODEV if the PEB was not found in the protection * in case of success and %-ENODEV if the PEB was not found.
* trees.
*/ */
static int prot_tree_del(struct ubi_device *ubi, int pnum) static int prot_queue_del(struct ubi_device *ubi, int pnum)
{ {
struct rb_node *p; struct ubi_wl_entry *e;
struct ubi_wl_prot_entry *pe = NULL;
p = ubi->prot.pnum.rb_node;
while (p) {
pe = rb_entry(p, struct ubi_wl_prot_entry, rb_pnum);
if (pnum == pe->e->pnum)
goto found;
if (pnum < pe->e->pnum) e = ubi->lookuptbl[pnum];
p = p->rb_left; if (!e)
else return -ENODEV;
p = p->rb_right;
}
return -ENODEV; if (paranoid_check_in_pq(ubi, e))
return -ENODEV;
found: list_del(&e->u.list);
ubi_assert(pe->e->pnum == pnum); dbg_wl("deleted PEB %d from the protection queue", e->pnum);
rb_erase(&pe->rb_aec, &ubi->prot.aec);
rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
kfree(pe);
return 0; return 0;
} }
...@@ -632,47 +546,47 @@ static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, ...@@ -632,47 +546,47 @@ static int sync_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
} }
/** /**
* check_protection_over - check if it is time to stop protecting some PEBs. * serve_prot_queue - check if it is time to stop protecting PEBs.
* @ubi: UBI device description object * @ubi: UBI device description object
* *
* This function is called after each erase operation, when the absolute erase * This function is called after each erase operation and removes PEBs from the
* counter is incremented, to check if some physical eraseblock have not to be * tail of the protection queue. These PEBs have been protected for long enough
* protected any longer. These physical eraseblocks are moved from the * and should be moved to the used tree.
* protection trees to the used tree.
*/ */
static void check_protection_over(struct ubi_device *ubi) static void serve_prot_queue(struct ubi_device *ubi)
{ {
struct ubi_wl_prot_entry *pe; struct ubi_wl_entry *e, *tmp;
int count;
/* /*
* There may be several protected physical eraseblock to remove, * There may be several protected physical eraseblock to remove,
* process them all. * process them all.
*/ */
while (1) { repeat:
spin_lock(&ubi->wl_lock); count = 0;
if (!ubi->prot.aec.rb_node) { spin_lock(&ubi->wl_lock);
spin_unlock(&ubi->wl_lock); list_for_each_entry_safe(e, tmp, &ubi->pq[ubi->pq_head], u.list) {
break; dbg_wl("PEB %d EC %d protection over, move to used tree",
} e->pnum, e->ec);
pe = rb_entry(rb_first(&ubi->prot.aec),
struct ubi_wl_prot_entry, rb_aec);
if (pe->abs_ec > ubi->abs_ec) { list_del(&e->u.list);
wl_tree_add(e, &ubi->used);
if (count++ > 32) {
/*
* Let's be nice and avoid holding the spinlock for
* too long.
*/
spin_unlock(&ubi->wl_lock); spin_unlock(&ubi->wl_lock);
break; cond_resched();
goto repeat;
} }
dbg_wl("PEB %d protection over, abs_ec %llu, PEB abs_ec %llu",
pe->e->pnum, ubi->abs_ec, pe->abs_ec);
rb_erase(&pe->rb_aec, &ubi->prot.aec);
rb_erase(&pe->rb_pnum, &ubi->prot.pnum);
wl_tree_add(pe->e, &ubi->used);
spin_unlock(&ubi->wl_lock);
kfree(pe);
cond_resched();
} }
ubi->pq_head += 1;
if (ubi->pq_head == UBI_PROT_QUEUE_LEN)
ubi->pq_head = 0;
ubi_assert(ubi->pq_head >= 0 && ubi->pq_head < UBI_PROT_QUEUE_LEN);
spin_unlock(&ubi->wl_lock);
} }
/** /**
...@@ -680,8 +594,8 @@ static void check_protection_over(struct ubi_device *ubi) ...@@ -680,8 +594,8 @@ static void check_protection_over(struct ubi_device *ubi)
* @ubi: UBI device description object * @ubi: UBI device description object
* @wrk: the work to schedule * @wrk: the work to schedule
* *
* This function enqueues a work defined by @wrk to the tail of the pending * This function adds a work defined by @wrk to the tail of the pending works
* works list. * list.
*/ */
static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk) static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
{ {
...@@ -739,13 +653,11 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e, ...@@ -739,13 +653,11 @@ static int schedule_erase(struct ubi_device *ubi, struct ubi_wl_entry *e,
static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
int cancel) int cancel)
{ {
int err, put = 0, scrubbing = 0, protect = 0; int err, scrubbing = 0, torture = 0;
struct ubi_wl_prot_entry *uninitialized_var(pe);
struct ubi_wl_entry *e1, *e2; struct ubi_wl_entry *e1, *e2;
struct ubi_vid_hdr *vid_hdr; struct ubi_vid_hdr *vid_hdr;
kfree(wrk); kfree(wrk);
if (cancel) if (cancel)
return 0; return 0;
...@@ -781,7 +693,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, ...@@ -781,7 +693,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
* highly worn-out free physical eraseblock. If the erase * highly worn-out free physical eraseblock. If the erase
* counters differ much enough, start wear-leveling. * counters differ much enough, start wear-leveling.
*/ */
e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb); e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) { if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) {
...@@ -790,21 +702,21 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, ...@@ -790,21 +702,21 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
goto out_cancel; goto out_cancel;
} }
paranoid_check_in_wl_tree(e1, &ubi->used); paranoid_check_in_wl_tree(e1, &ubi->used);
rb_erase(&e1->rb, &ubi->used); rb_erase(&e1->u.rb, &ubi->used);
dbg_wl("move PEB %d EC %d to PEB %d EC %d", dbg_wl("move PEB %d EC %d to PEB %d EC %d",
e1->pnum, e1->ec, e2->pnum, e2->ec); e1->pnum, e1->ec, e2->pnum, e2->ec);
} else { } else {
/* Perform scrubbing */ /* Perform scrubbing */
scrubbing = 1; scrubbing = 1;
e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, rb); e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
paranoid_check_in_wl_tree(e1, &ubi->scrub); paranoid_check_in_wl_tree(e1, &ubi->scrub);
rb_erase(&e1->rb, &ubi->scrub); rb_erase(&e1->u.rb, &ubi->scrub);
dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum); dbg_wl("scrub PEB %d to PEB %d", e1->pnum, e2->pnum);
} }
paranoid_check_in_wl_tree(e2, &ubi->free); paranoid_check_in_wl_tree(e2, &ubi->free);
rb_erase(&e2->rb, &ubi->free); rb_erase(&e2->u.rb, &ubi->free);
ubi->move_from = e1; ubi->move_from = e1;
ubi->move_to = e2; ubi->move_to = e2;
spin_unlock(&ubi->wl_lock); spin_unlock(&ubi->wl_lock);
...@@ -844,46 +756,67 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, ...@@ -844,46 +756,67 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr); err = ubi_eba_copy_leb(ubi, e1->pnum, e2->pnum, vid_hdr);
if (err) { if (err) {
if (err == -EAGAIN)
goto out_not_moved;
if (err < 0) if (err < 0)
goto out_error; goto out_error;
if (err == 1) if (err == 2) {
/* Target PEB write error, torture it */
torture = 1;
goto out_not_moved; goto out_not_moved;
}
/* /*
* For some reason the LEB was not moved - it might be because * The LEB has not been moved because the volume is being
* the volume is being deleted. We should prevent this PEB from * deleted or the PEB has been put meanwhile. We should prevent
* being selected for wear-levelling movement for some "time", * this PEB from being selected for wear-leveling movement
* so put it to the protection tree. * again, so put it to the protection queue.
*/ */
dbg_wl("cancelled moving PEB %d", e1->pnum); dbg_wl("canceled moving PEB %d", e1->pnum);
pe = kmalloc(sizeof(struct ubi_wl_prot_entry), GFP_NOFS); ubi_assert(err == 1);
if (!pe) {
err = -ENOMEM; ubi_free_vid_hdr(ubi, vid_hdr);
goto out_error; vid_hdr = NULL;
}
spin_lock(&ubi->wl_lock);
prot_queue_add(ubi, e1);
ubi_assert(!ubi->move_to_put);
ubi->move_from = ubi->move_to = NULL;
ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock);
protect = 1; e1 = NULL;
err = schedule_erase(ubi, e2, 0);
if (err)
goto out_error;
mutex_unlock(&ubi->move_mutex);
return 0;
} }
/* The PEB has been successfully moved */
ubi_free_vid_hdr(ubi, vid_hdr); ubi_free_vid_hdr(ubi, vid_hdr);
if (scrubbing && !protect) vid_hdr = NULL;
if (scrubbing)
ubi_msg("scrubbed PEB %d, data moved to PEB %d", ubi_msg("scrubbed PEB %d, data moved to PEB %d",
e1->pnum, e2->pnum); e1->pnum, e2->pnum);
spin_lock(&ubi->wl_lock); spin_lock(&ubi->wl_lock);
if (protect) if (!ubi->move_to_put) {
prot_tree_add(ubi, e1, pe, protect);
if (!ubi->move_to_put)
wl_tree_add(e2, &ubi->used); wl_tree_add(e2, &ubi->used);
else e2 = NULL;
put = 1; }
ubi->move_from = ubi->move_to = NULL; ubi->move_from = ubi->move_to = NULL;
ubi->move_to_put = ubi->wl_scheduled = 0; ubi->move_to_put = ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock); spin_unlock(&ubi->wl_lock);
if (put) { err = schedule_erase(ubi, e1, 0);
if (err) {
e1 = NULL;
goto out_error;
}
if (e2) {
/* /*
* Well, the target PEB was put meanwhile, schedule it for * Well, the target PEB was put meanwhile, schedule it for
* erasure. * erasure.
...@@ -894,13 +827,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, ...@@ -894,13 +827,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
goto out_error; goto out_error;
} }
if (!protect) {
err = schedule_erase(ubi, e1, 0);
if (err)
goto out_error;
}
dbg_wl("done"); dbg_wl("done");
mutex_unlock(&ubi->move_mutex); mutex_unlock(&ubi->move_mutex);
return 0; return 0;
...@@ -908,20 +834,24 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, ...@@ -908,20 +834,24 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
/* /*
* For some reasons the LEB was not moved, might be an error, might be * For some reasons the LEB was not moved, might be an error, might be
* something else. @e1 was not changed, so return it back. @e2 might * something else. @e1 was not changed, so return it back. @e2 might
* be changed, schedule it for erasure. * have been changed, schedule it for erasure.
*/ */
out_not_moved: out_not_moved:
dbg_wl("canceled moving PEB %d", e1->pnum);
ubi_free_vid_hdr(ubi, vid_hdr); ubi_free_vid_hdr(ubi, vid_hdr);
vid_hdr = NULL;
spin_lock(&ubi->wl_lock); spin_lock(&ubi->wl_lock);
if (scrubbing) if (scrubbing)
wl_tree_add(e1, &ubi->scrub); wl_tree_add(e1, &ubi->scrub);
else else
wl_tree_add(e1, &ubi->used); wl_tree_add(e1, &ubi->used);
ubi_assert(!ubi->move_to_put);
ubi->move_from = ubi->move_to = NULL; ubi->move_from = ubi->move_to = NULL;
ubi->move_to_put = ubi->wl_scheduled = 0; ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock); spin_unlock(&ubi->wl_lock);
err = schedule_erase(ubi, e2, 0); e1 = NULL;
err = schedule_erase(ubi, e2, torture);
if (err) if (err)
goto out_error; goto out_error;
...@@ -938,8 +868,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, ...@@ -938,8 +868,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
ubi->move_to_put = ubi->wl_scheduled = 0; ubi->move_to_put = ubi->wl_scheduled = 0;
spin_unlock(&ubi->wl_lock); spin_unlock(&ubi->wl_lock);
kmem_cache_free(ubi_wl_entry_slab, e1); if (e1)
kmem_cache_free(ubi_wl_entry_slab, e2); kmem_cache_free(ubi_wl_entry_slab, e1);
if (e2)
kmem_cache_free(ubi_wl_entry_slab, e2);
ubi_ro_mode(ubi); ubi_ro_mode(ubi);
mutex_unlock(&ubi->move_mutex); mutex_unlock(&ubi->move_mutex);
...@@ -988,7 +920,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi) ...@@ -988,7 +920,7 @@ static int ensure_wear_leveling(struct ubi_device *ubi)
* erase counter of free physical eraseblocks is greater then * erase counter of free physical eraseblocks is greater then
* %UBI_WL_THRESHOLD. * %UBI_WL_THRESHOLD.
*/ */
e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, rb); e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF); e2 = find_wl_entry(&ubi->free, WL_FREE_MAX_DIFF);
if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD)) if (!(e2->ec - e1->ec >= UBI_WL_THRESHOLD))
...@@ -1050,7 +982,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, ...@@ -1050,7 +982,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
kfree(wl_wrk); kfree(wl_wrk);
spin_lock(&ubi->wl_lock); spin_lock(&ubi->wl_lock);
ubi->abs_ec += 1;
wl_tree_add(e, &ubi->free); wl_tree_add(e, &ubi->free);
spin_unlock(&ubi->wl_lock); spin_unlock(&ubi->wl_lock);
...@@ -1058,7 +989,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, ...@@ -1058,7 +989,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
* One more erase operation has happened, take care about * One more erase operation has happened, take care about
* protected physical eraseblocks. * protected physical eraseblocks.
*/ */
check_protection_over(ubi); serve_prot_queue(ubi);
/* And take care about wear-leveling */ /* And take care about wear-leveling */
err = ensure_wear_leveling(ubi); err = ensure_wear_leveling(ubi);
...@@ -1190,12 +1121,12 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture) ...@@ -1190,12 +1121,12 @@ int ubi_wl_put_peb(struct ubi_device *ubi, int pnum, int torture)
} else { } else {
if (in_wl_tree(e, &ubi->used)) { if (in_wl_tree(e, &ubi->used)) {
paranoid_check_in_wl_tree(e, &ubi->used); paranoid_check_in_wl_tree(e, &ubi->used);
rb_erase(&e->rb, &ubi->used); rb_erase(&e->u.rb, &ubi->used);
} else if (in_wl_tree(e, &ubi->scrub)) { } else if (in_wl_tree(e, &ubi->scrub)) {
paranoid_check_in_wl_tree(e, &ubi->scrub); paranoid_check_in_wl_tree(e, &ubi->scrub);
rb_erase(&e->rb, &ubi->scrub); rb_erase(&e->u.rb, &ubi->scrub);
} else { } else {
err = prot_tree_del(ubi, e->pnum); err = prot_queue_del(ubi, e->pnum);
if (err) { if (err) {
ubi_err("PEB %d not found", pnum); ubi_err("PEB %d not found", pnum);
ubi_ro_mode(ubi); ubi_ro_mode(ubi);
...@@ -1255,11 +1186,11 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum) ...@@ -1255,11 +1186,11 @@ int ubi_wl_scrub_peb(struct ubi_device *ubi, int pnum)
if (in_wl_tree(e, &ubi->used)) { if (in_wl_tree(e, &ubi->used)) {
paranoid_check_in_wl_tree(e, &ubi->used); paranoid_check_in_wl_tree(e, &ubi->used);
rb_erase(&e->rb, &ubi->used); rb_erase(&e->u.rb, &ubi->used);
} else { } else {
int err; int err;
err = prot_tree_del(ubi, e->pnum); err = prot_queue_del(ubi, e->pnum);
if (err) { if (err) {
ubi_err("PEB %d not found", pnum); ubi_err("PEB %d not found", pnum);
ubi_ro_mode(ubi); ubi_ro_mode(ubi);
...@@ -1290,7 +1221,7 @@ int ubi_wl_flush(struct ubi_device *ubi) ...@@ -1290,7 +1221,7 @@ int ubi_wl_flush(struct ubi_device *ubi)
int err; int err;
/* /*
* Erase while the pending works queue is not empty, but not more then * Erase while the pending works queue is not empty, but not more than
* the number of currently pending works. * the number of currently pending works.
*/ */
dbg_wl("flush (%d pending works)", ubi->works_count); dbg_wl("flush (%d pending works)", ubi->works_count);
...@@ -1308,7 +1239,7 @@ int ubi_wl_flush(struct ubi_device *ubi) ...@@ -1308,7 +1239,7 @@ int ubi_wl_flush(struct ubi_device *ubi)
up_write(&ubi->work_sem); up_write(&ubi->work_sem);
/* /*
* And in case last was the WL worker and it cancelled the LEB * And in case last was the WL worker and it canceled the LEB
* movement, flush again. * movement, flush again.
*/ */
while (ubi->works_count) { while (ubi->works_count) {
...@@ -1337,11 +1268,11 @@ static void tree_destroy(struct rb_root *root) ...@@ -1337,11 +1268,11 @@ static void tree_destroy(struct rb_root *root)
else if (rb->rb_right) else if (rb->rb_right)
rb = rb->rb_right; rb = rb->rb_right;
else { else {
e = rb_entry(rb, struct ubi_wl_entry, rb); e = rb_entry(rb, struct ubi_wl_entry, u.rb);
rb = rb_parent(rb); rb = rb_parent(rb);
if (rb) { if (rb) {
if (rb->rb_left == &e->rb) if (rb->rb_left == &e->u.rb)
rb->rb_left = NULL; rb->rb_left = NULL;
else else
rb->rb_right = NULL; rb->rb_right = NULL;
...@@ -1436,15 +1367,13 @@ static void cancel_pending(struct ubi_device *ubi) ...@@ -1436,15 +1367,13 @@ static void cancel_pending(struct ubi_device *ubi)
*/ */
int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
{ {
int err; int err, i;
struct rb_node *rb1, *rb2; struct rb_node *rb1, *rb2;
struct ubi_scan_volume *sv; struct ubi_scan_volume *sv;
struct ubi_scan_leb *seb, *tmp; struct ubi_scan_leb *seb, *tmp;
struct ubi_wl_entry *e; struct ubi_wl_entry *e;
ubi->used = ubi->free = ubi->scrub = RB_ROOT; ubi->used = ubi->free = ubi->scrub = RB_ROOT;
ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
spin_lock_init(&ubi->wl_lock); spin_lock_init(&ubi->wl_lock);
mutex_init(&ubi->move_mutex); mutex_init(&ubi->move_mutex);
init_rwsem(&ubi->work_sem); init_rwsem(&ubi->work_sem);
...@@ -1458,6 +1387,10 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) ...@@ -1458,6 +1387,10 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
if (!ubi->lookuptbl) if (!ubi->lookuptbl)
return err; return err;
for (i = 0; i < UBI_PROT_QUEUE_LEN; i++)
INIT_LIST_HEAD(&ubi->pq[i]);
ubi->pq_head = 0;
list_for_each_entry_safe(seb, tmp, &si->erase, u.list) { list_for_each_entry_safe(seb, tmp, &si->erase, u.list) {
cond_resched(); cond_resched();
...@@ -1552,33 +1485,18 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si) ...@@ -1552,33 +1485,18 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
} }
/** /**
* protection_trees_destroy - destroy the protection RB-trees. * protection_queue_destroy - destroy the protection queue.
* @ubi: UBI device description object * @ubi: UBI device description object
*/ */
static void protection_trees_destroy(struct ubi_device *ubi) static void protection_queue_destroy(struct ubi_device *ubi)
{ {
struct rb_node *rb; int i;
struct ubi_wl_prot_entry *pe; struct ubi_wl_entry *e, *tmp;
rb = ubi->prot.aec.rb_node;
while (rb) {
if (rb->rb_left)
rb = rb->rb_left;
else if (rb->rb_right)
rb = rb->rb_right;
else {
pe = rb_entry(rb, struct ubi_wl_prot_entry, rb_aec);
rb = rb_parent(rb);
if (rb) {
if (rb->rb_left == &pe->rb_aec)
rb->rb_left = NULL;
else
rb->rb_right = NULL;
}
kmem_cache_free(ubi_wl_entry_slab, pe->e); for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i) {
kfree(pe); list_for_each_entry_safe(e, tmp, &ubi->pq[i], u.list) {
list_del(&e->u.list);
kmem_cache_free(ubi_wl_entry_slab, e);
} }
} }
} }
...@@ -1591,7 +1509,7 @@ void ubi_wl_close(struct ubi_device *ubi) ...@@ -1591,7 +1509,7 @@ void ubi_wl_close(struct ubi_device *ubi)
{ {
dbg_wl("close the WL sub-system"); dbg_wl("close the WL sub-system");
cancel_pending(ubi); cancel_pending(ubi);
protection_trees_destroy(ubi); protection_queue_destroy(ubi);
tree_destroy(&ubi->used); tree_destroy(&ubi->used);
tree_destroy(&ubi->free); tree_destroy(&ubi->free);
tree_destroy(&ubi->scrub); tree_destroy(&ubi->scrub);
...@@ -1661,4 +1579,27 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e, ...@@ -1661,4 +1579,27 @@ static int paranoid_check_in_wl_tree(struct ubi_wl_entry *e,
return 1; return 1;
} }
/**
* paranoid_check_in_pq - check if wear-leveling entry is in the protection
* queue.
* @ubi: UBI device description object
* @e: the wear-leveling entry to check
*
* This function returns zero if @e is in @ubi->pq and %1 if it is not.
*/
static int paranoid_check_in_pq(struct ubi_device *ubi, struct ubi_wl_entry *e)
{
struct ubi_wl_entry *p;
int i;
for (i = 0; i < UBI_PROT_QUEUE_LEN; ++i)
list_for_each_entry(p, &ubi->pq[i], u.list)
if (p == e)
return 0;
ubi_err("paranoid check failed for PEB %d, EC %d, Protect queue",
e->pnum, e->ec);
ubi_dbg_dump_stack();
return 1;
}
#endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */ #endif /* CONFIG_MTD_UBI_DEBUG_PARANOID */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment