Commit f8860802 authored by Brian Norris's avatar Brian Norris

mtd: spi-nor: make lock/unlock bounds checks more obvious and robust

There are a few different corner cases to the current logic that seem
undesirable:

* mtd_lock() with offs==0 trips a bounds issue on
  ofs - mtd->erasesize < 0

* mtd_unlock() on the middle of a flash that is already unlocked will
  return -EINVAL

* probably other corner cases

So, let's stop doing "smart" checks like "check the block below us",
let's just do the following:

(a) pass only non-negative offsets/lengths to stm_is_locked_sr()
(b) add a similar stm_is_unlocked_sr() function, so we can check if the
    *entire* range is unlocked (and not just whether some part of it is
    unlocked)

Then armed with (b), we can make lock() and unlock() much more
symmetric:

(c) short-circuit the procedure if there is no work to be done, and
(d) check the entire range above/below

This also aligns well with the structure needed for proper TB
(Top/Bottom) support.
Signed-off-by: default avatarBrian Norris <computersforpeace@gmail.com>
Tested-by: default avatarEzequiel Garcia <ezequiel@vanguardiasur.com.ar>
parent 4c0dba44
...@@ -439,17 +439,38 @@ static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs, ...@@ -439,17 +439,38 @@ static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs,
} }
/* /*
* Return 1 if the entire region is locked, 0 otherwise * Return 1 if the entire region is locked (if @locked is true) or unlocked (if
* @locked is false); 0 otherwise
*/ */
static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len, static int stm_check_lock_status_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
u8 sr) u8 sr, bool locked)
{ {
loff_t lock_offs; loff_t lock_offs;
uint64_t lock_len; uint64_t lock_len;
if (!len)
return 1;
stm_get_locked_range(nor, sr, &lock_offs, &lock_len); stm_get_locked_range(nor, sr, &lock_offs, &lock_len);
if (locked)
/* Requested range is a sub-range of locked range */
return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs); return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs);
else
/* Requested range does not overlap with locked range */
return (ofs >= lock_offs + lock_len) || (ofs + len <= lock_offs);
}
static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
u8 sr)
{
return stm_check_lock_status_sr(nor, ofs, len, sr, true);
}
static int stm_is_unlocked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len,
u8 sr)
{
return stm_check_lock_status_sr(nor, ofs, len, sr, false);
} }
/* /*
...@@ -481,20 +502,24 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) ...@@ -481,20 +502,24 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
int status_old, status_new; int status_old, status_new;
u8 mask = SR_BP2 | SR_BP1 | SR_BP0; u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
u8 shift = ffs(mask) - 1, pow, val; u8 shift = ffs(mask) - 1, pow, val;
loff_t lock_len;
int ret; int ret;
status_old = read_sr(nor); status_old = read_sr(nor);
if (status_old < 0) if (status_old < 0)
return status_old; return status_old;
/* SPI NOR always locks to the end */ /* If nothing in our range is unlocked, we don't need to do anything */
if (ofs + len != mtd->size) { if (stm_is_locked_sr(nor, ofs, len, status_old))
/* Does combined region extend to end? */ return 0;
if (!stm_is_locked_sr(nor, ofs + len, mtd->size - ofs - len,
/* If anything above us is unlocked, we can't use 'top' protection */
if (!stm_is_locked_sr(nor, ofs + len, mtd->size - (ofs + len),
status_old)) status_old))
return -EINVAL; return -EINVAL;
len = mtd->size - ofs;
} /* lock_len: length of region that should end up locked */
lock_len = mtd->size - ofs;
/* /*
* Need smallest pow such that: * Need smallest pow such that:
...@@ -505,7 +530,7 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) ...@@ -505,7 +530,7 @@ static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len)
* *
* pow = ceil(log2(size / len)) = log2(size) - floor(log2(len)) * pow = ceil(log2(size / len)) = log2(size) - floor(log2(len))
*/ */
pow = ilog2(mtd->size) - ilog2(len); pow = ilog2(mtd->size) - ilog2(lock_len);
val = mask - (pow << shift); val = mask - (pow << shift);
if (val & ~mask) if (val & ~mask)
return -EINVAL; return -EINVAL;
...@@ -541,17 +566,24 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) ...@@ -541,17 +566,24 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
int status_old, status_new; int status_old, status_new;
u8 mask = SR_BP2 | SR_BP1 | SR_BP0; u8 mask = SR_BP2 | SR_BP1 | SR_BP0;
u8 shift = ffs(mask) - 1, pow, val; u8 shift = ffs(mask) - 1, pow, val;
loff_t lock_len;
int ret; int ret;
status_old = read_sr(nor); status_old = read_sr(nor);
if (status_old < 0) if (status_old < 0)
return status_old; return status_old;
/* Cannot unlock; would unlock larger region than requested */ /* If nothing in our range is locked, we don't need to do anything */
if (stm_is_locked_sr(nor, ofs - mtd->erasesize, mtd->erasesize, if (stm_is_unlocked_sr(nor, ofs, len, status_old))
status_old)) return 0;
/* If anything below us is locked, we can't use 'top' protection */
if (!stm_is_unlocked_sr(nor, 0, ofs, status_old))
return -EINVAL; return -EINVAL;
/* lock_len: length of region that should remain locked */
lock_len = mtd->size - (ofs + len);
/* /*
* Need largest pow such that: * Need largest pow such that:
* *
...@@ -561,8 +593,8 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) ...@@ -561,8 +593,8 @@ static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len)
* *
* pow = floor(log2(size / len)) = log2(size) - ceil(log2(len)) * pow = floor(log2(size / len)) = log2(size) - ceil(log2(len))
*/ */
pow = ilog2(mtd->size) - order_base_2(mtd->size - (ofs + len)); pow = ilog2(mtd->size) - order_base_2(lock_len);
if (ofs + len == mtd->size) { if (lock_len == 0) {
val = 0; /* fully unlocked */ val = 0; /* fully unlocked */
} else { } else {
val = mask - (pow << shift); val = mask - (pow << shift);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment