Commit 5c395ae7 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'linux-next' of git://git.infradead.org/ubifs-2.6

* 'linux-next' of git://git.infradead.org/ubifs-2.6:
  UBI: fix use-after-free on error path
  UBI: fix missing scrub when there is a bit-flip
  UBIFS: Use kmemdup rather than duplicating its implementation
parents 49d41bae e57e0d8e
...@@ -1028,12 +1028,14 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to, ...@@ -1028,12 +1028,14 @@ int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
* 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
* holding @ubi->move_mutex and go sleep on the LEB lock. So, if the * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
* LEB is already locked, we just do not move it and return * LEB is already locked, we just do not move it and return
* %MOVE_CANCEL_RACE, which means that UBI will re-try, but later. * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
* we do not know the reasons of the contention - it may be just a
* normal I/O on this LEB, so we want to re-try.
*/ */
err = leb_write_trylock(ubi, vol_id, lnum); err = leb_write_trylock(ubi, vol_id, lnum);
if (err) { if (err) {
dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum); dbg_wl("contention on LEB %d:%d, cancel", vol_id, lnum);
return MOVE_CANCEL_RACE; return MOVE_RETRY;
} }
/* /*
......
...@@ -120,6 +120,7 @@ enum { ...@@ -120,6 +120,7 @@ enum {
* PEB * PEB
* MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the * MOVE_CANCEL_BITFLIPS: canceled because a bit-flip was detected in the
* target PEB * target PEB
* MOVE_RETRY: retry scrubbing the PEB
*/ */
enum { enum {
MOVE_CANCEL_RACE = 1, MOVE_CANCEL_RACE = 1,
...@@ -127,6 +128,7 @@ enum { ...@@ -127,6 +128,7 @@ enum {
MOVE_TARGET_RD_ERR, MOVE_TARGET_RD_ERR,
MOVE_TARGET_WR_ERR, MOVE_TARGET_WR_ERR,
MOVE_CANCEL_BITFLIPS, MOVE_CANCEL_BITFLIPS,
MOVE_RETRY,
}; };
/** /**
......
...@@ -795,7 +795,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk, ...@@ -795,7 +795,10 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
protect = 1; protect = 1;
goto out_not_moved; goto out_not_moved;
} }
if (err == MOVE_RETRY) {
scrubbing = 1;
goto out_not_moved;
}
if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR || if (err == MOVE_CANCEL_BITFLIPS || err == MOVE_TARGET_WR_ERR ||
err == MOVE_TARGET_RD_ERR) { err == MOVE_TARGET_RD_ERR) {
/* /*
...@@ -1049,7 +1052,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, ...@@ -1049,7 +1052,6 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
ubi_err("failed to erase PEB %d, error %d", pnum, err); ubi_err("failed to erase PEB %d, error %d", pnum, err);
kfree(wl_wrk); kfree(wl_wrk);
kmem_cache_free(ubi_wl_entry_slab, e);
if (err == -EINTR || err == -ENOMEM || err == -EAGAIN || if (err == -EINTR || err == -ENOMEM || err == -EAGAIN ||
err == -EBUSY) { err == -EBUSY) {
...@@ -1062,14 +1064,16 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk, ...@@ -1062,14 +1064,16 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
goto out_ro; goto out_ro;
} }
return err; return err;
} else if (err != -EIO) { }
kmem_cache_free(ubi_wl_entry_slab, e);
if (err != -EIO)
/* /*
* If this is not %-EIO, we have no idea what to do. Scheduling * If this is not %-EIO, we have no idea what to do. Scheduling
* this physical eraseblock for erasure again would cause * this physical eraseblock for erasure again would cause
* errors again and again. Well, lets switch to R/O mode. * errors again and again. Well, lets switch to R/O mode.
*/ */
goto out_ro; goto out_ro;
}
/* It is %-EIO, the PEB went bad */ /* It is %-EIO, the PEB went bad */
......
...@@ -1986,12 +1986,11 @@ int ubifs_lpt_scan_nolock(struct ubifs_info *c, int start_lnum, int end_lnum, ...@@ -1986,12 +1986,11 @@ int ubifs_lpt_scan_nolock(struct ubifs_info *c, int start_lnum, int end_lnum,
if (path[h].in_tree) if (path[h].in_tree)
continue; continue;
nnode = kmalloc(sz, GFP_NOFS); nnode = kmemdup(&path[h].nnode, sz, GFP_NOFS);
if (!nnode) { if (!nnode) {
err = -ENOMEM; err = -ENOMEM;
goto out; goto out;
} }
memcpy(nnode, &path[h].nnode, sz);
parent = nnode->parent; parent = nnode->parent;
parent->nbranch[nnode->iip].nnode = nnode; parent->nbranch[nnode->iip].nnode = nnode;
path[h].ptr.nnode = nnode; path[h].ptr.nnode = nnode;
...@@ -2004,12 +2003,11 @@ int ubifs_lpt_scan_nolock(struct ubifs_info *c, int start_lnum, int end_lnum, ...@@ -2004,12 +2003,11 @@ int ubifs_lpt_scan_nolock(struct ubifs_info *c, int start_lnum, int end_lnum,
const size_t sz = sizeof(struct ubifs_pnode); const size_t sz = sizeof(struct ubifs_pnode);
struct ubifs_nnode *parent; struct ubifs_nnode *parent;
pnode = kmalloc(sz, GFP_NOFS); pnode = kmemdup(&path[h].pnode, sz, GFP_NOFS);
if (!pnode) { if (!pnode) {
err = -ENOMEM; err = -ENOMEM;
goto out; goto out;
} }
memcpy(pnode, &path[h].pnode, sz);
parent = pnode->parent; parent = pnode->parent;
parent->nbranch[pnode->iip].pnode = pnode; parent->nbranch[pnode->iip].pnode = pnode;
path[h].ptr.pnode = pnode; path[h].ptr.pnode = pnode;
......
...@@ -344,12 +344,11 @@ static int lnc_add(struct ubifs_info *c, struct ubifs_zbranch *zbr, ...@@ -344,12 +344,11 @@ static int lnc_add(struct ubifs_info *c, struct ubifs_zbranch *zbr,
return err; return err;
} }
lnc_node = kmalloc(zbr->len, GFP_NOFS); lnc_node = kmemdup(node, zbr->len, GFP_NOFS);
if (!lnc_node) if (!lnc_node)
/* We don't have to have the cache, so no error */ /* We don't have to have the cache, so no error */
return 0; return 0;
memcpy(lnc_node, node, zbr->len);
zbr->leaf = lnc_node; zbr->leaf = lnc_node;
return 0; return 0;
} }
......
...@@ -138,12 +138,11 @@ static int create_xattr(struct ubifs_info *c, struct inode *host, ...@@ -138,12 +138,11 @@ static int create_xattr(struct ubifs_info *c, struct inode *host,
ui = ubifs_inode(inode); ui = ubifs_inode(inode);
ui->xattr = 1; ui->xattr = 1;
ui->flags |= UBIFS_XATTR_FL; ui->flags |= UBIFS_XATTR_FL;
ui->data = kmalloc(size, GFP_NOFS); ui->data = kmemdup(value, size, GFP_NOFS);
if (!ui->data) { if (!ui->data) {
err = -ENOMEM; err = -ENOMEM;
goto out_free; goto out_free;
} }
memcpy(ui->data, value, size);
inode->i_size = ui->ui_size = size; inode->i_size = ui->ui_size = size;
ui->data_len = size; ui->data_len = size;
...@@ -204,12 +203,11 @@ static int change_xattr(struct ubifs_info *c, struct inode *host, ...@@ -204,12 +203,11 @@ static int change_xattr(struct ubifs_info *c, struct inode *host,
return err; return err;
kfree(ui->data); kfree(ui->data);
ui->data = kmalloc(size, GFP_NOFS); ui->data = kmemdup(value, size, GFP_NOFS);
if (!ui->data) { if (!ui->data) {
err = -ENOMEM; err = -ENOMEM;
goto out_free; goto out_free;
} }
memcpy(ui->data, value, size);
inode->i_size = ui->ui_size = size; inode->i_size = ui->ui_size = size;
ui->data_len = size; ui->data_len = size;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment