Commit b11445f8 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'upstream-3.18-rc1-v2' of git://git.infradead.org/linux-ubifs

Pull UBI/UBIFS fixes from Artem Bityutskiy:
 - fix for a theoretical race condition which could lead to a situation
   when UBIFS is unable to mount a file-system (Hujianyang)
 - a few fixes for the ubiblock sybsystem, error path fixes
 - the ubiblock subsystem has had the volume size change handling
   improved
 - a few fixes and nicifications in the fastmap subsystem

* tag 'upstream-3.18-rc1-v2' of git://git.infradead.org/linux-ubifs:
  UBI: Fastmap: Calc fastmap size correctly
  UBIFS: Fix trivial typo in power_cut_emulated()
  UBI: Fix trivial typo in __schedule_ubi_work
  UBI: wl: Rename cancel flag to shutdown
  UBI: ubi_eba_read_leb: Remove in vain variable assignment
  UBIFS: Align the dump messages of SB_NODE
  UBI: Fix livelock in produce_free_peb()
  UBI: return on error in rename_volumes()
  UBI: Improve comment on work_sem
  UBIFS: Remove bogus assert
  UBI: Dispatch update notification if the volume is updated
  UBI: block: Add support for the UBI_VOLUME_UPDATED notification
  UBI: block: Fix block device size setting
  UBI: block: fix dereference on uninitialized dev
  UBI: add missing kmem_cache_free() in process_pool_aeb error path
  UBIFS: fix free log space calculation
  UBIFS: fix a race condition
parents 0ef3a56b 91401a34
......@@ -188,8 +188,9 @@ static int ubiblock_read_to_buf(struct ubiblock *dev, char *buffer,
ret = ubi_read(dev->desc, leb, buffer, offset, len);
if (ret) {
ubi_err("%s ubi_read error %d",
dev->gd->disk_name, ret);
ubi_err("%s: error %d while reading from LEB %d (offset %d, "
"length %d)", dev->gd->disk_name, ret, leb, offset,
len);
return ret;
}
return 0;
......@@ -378,7 +379,7 @@ int ubiblock_create(struct ubi_volume_info *vi)
{
struct ubiblock *dev;
struct gendisk *gd;
u64 disk_capacity = ((u64)vi->size * vi->usable_leb_size) >> 9;
u64 disk_capacity = vi->used_bytes >> 9;
int ret;
if ((sector_t)disk_capacity != disk_capacity)
......@@ -502,13 +503,8 @@ int ubiblock_remove(struct ubi_volume_info *vi)
static int ubiblock_resize(struct ubi_volume_info *vi)
{
struct ubiblock *dev;
u64 disk_capacity = ((u64)vi->size * vi->usable_leb_size) >> 9;
u64 disk_capacity = vi->used_bytes >> 9;
if ((sector_t)disk_capacity != disk_capacity) {
ubi_warn("%s: the volume is too big, cannot resize (%d LEBs)",
dev->gd->disk_name, vi->size);
return -EFBIG;
}
/*
* Need to lock the device list until we stop using the device,
* otherwise the device struct might get released in
......@@ -520,10 +516,20 @@ static int ubiblock_resize(struct ubi_volume_info *vi)
mutex_unlock(&devices_mutex);
return -ENODEV;
}
if ((sector_t)disk_capacity != disk_capacity) {
mutex_unlock(&devices_mutex);
ubi_warn("%s: the volume is too big (%d LEBs), cannot resize",
dev->gd->disk_name, vi->size);
return -EFBIG;
}
mutex_lock(&dev->dev_mutex);
set_capacity(dev->gd, disk_capacity);
ubi_msg("%s resized to %d LEBs", dev->gd->disk_name, vi->size);
if (get_capacity(dev->gd) != disk_capacity) {
set_capacity(dev->gd, disk_capacity);
ubi_msg("%s resized to %lld bytes", dev->gd->disk_name,
vi->used_bytes);
}
mutex_unlock(&dev->dev_mutex);
mutex_unlock(&devices_mutex);
return 0;
......@@ -547,6 +553,14 @@ static int ubiblock_notify(struct notifier_block *nb,
case UBI_VOLUME_RESIZED:
ubiblock_resize(&nt->vi);
break;
case UBI_VOLUME_UPDATED:
/*
* If the volume is static, a content update might mean the
* size (i.e. used_bytes) was also changed.
*/
if (nt->vi.vol_type == UBI_STATIC_VOLUME)
ubiblock_resize(&nt->vi);
break;
default:
break;
}
......
......@@ -425,8 +425,10 @@ static long vol_cdev_ioctl(struct file *file, unsigned int cmd,
break;
err = ubi_start_update(ubi, vol, bytes);
if (bytes == 0)
if (bytes == 0) {
ubi_volume_notify(ubi, vol, UBI_VOLUME_UPDATED);
revoke_exclusive(desc, UBI_READWRITE);
}
break;
}
......@@ -699,7 +701,7 @@ static int rename_volumes(struct ubi_device *ubi,
req->ents[i].name[req->ents[i].name_len] = '\0';
n = strlen(req->ents[i].name);
if (n != req->ents[i].name_len)
err = -EINVAL;
return -EINVAL;
}
/* Make sure volume IDs and names are unique */
......
......@@ -441,10 +441,9 @@ int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
err = ubi_io_read_data(ubi, buf, pnum, offset, len);
if (err) {
if (err == UBI_IO_BITFLIPS) {
if (err == UBI_IO_BITFLIPS)
scrub = 1;
err = 0;
} else if (mtd_is_eccerr(err)) {
else if (mtd_is_eccerr(err)) {
if (vol->vol_type == UBI_DYNAMIC_VOLUME)
goto out_unlock;
scrub = 1;
......
......@@ -24,7 +24,8 @@ size_t ubi_calc_fm_size(struct ubi_device *ubi)
{
size_t size;
size = sizeof(struct ubi_fm_hdr) + \
size = sizeof(struct ubi_fm_sb) + \
sizeof(struct ubi_fm_hdr) + \
sizeof(struct ubi_fm_scan_pool) + \
sizeof(struct ubi_fm_scan_pool) + \
(ubi->peb_count * sizeof(struct ubi_fm_ec)) + \
......@@ -330,6 +331,7 @@ static int process_pool_aeb(struct ubi_device *ubi, struct ubi_attach_info *ai,
av = tmp_av;
else {
ubi_err("orphaned volume in fastmap pool!");
kmem_cache_free(ai->aeb_slab_cache, new_aeb);
return UBI_BAD_FASTMAP;
}
......
......@@ -439,7 +439,8 @@ struct ubi_debug_info {
* @move_to, @move_to_put @erase_pending, @wl_scheduled, @works,
* @erroneous, and @erroneous_peb_count fields
* @move_mutex: serializes eraseblock moves
* @work_sem: synchronizes the WL worker with use tasks
* @work_sem: used to wait for all the scheduled works to finish and prevent
* new works from being submitted
* @wl_scheduled: non-zero if the wear-leveling was scheduled
* @lookuptbl: a table to quickly find a &struct ubi_wl_entry object for any
* physical eraseblock
......@@ -713,14 +714,15 @@ struct ubi_attach_info {
* @torture: if the physical eraseblock has to be tortured
* @anchor: produce a anchor PEB to by used by fastmap
*
* The @func pointer points to the worker function. If the @cancel argument is
* not zero, the worker has to free the resources and exit immediately. The
* worker has to return zero in case of success and a negative error code in
* The @func pointer points to the worker function. If the @shutdown argument is
* not zero, the worker has to free the resources and exit immediately as the
* WL sub-system is shutting down.
* The worker has to return zero in case of success and a negative error code in
* case of failure.
*/
struct ubi_work {
struct list_head list;
int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int cancel);
int (*func)(struct ubi_device *ubi, struct ubi_work *wrk, int shutdown);
/* The below fields are only relevant to erasure works */
struct ubi_wl_entry *e;
int vol_id;
......
......@@ -272,7 +272,7 @@ static int produce_free_peb(struct ubi_device *ubi)
{
int err;
while (!ubi->free.rb_node) {
while (!ubi->free.rb_node && ubi->works_count) {
spin_unlock(&ubi->wl_lock);
dbg_wl("do one work synchronously");
......@@ -835,7 +835,7 @@ static void serve_prot_queue(struct ubi_device *ubi)
* @wrk: the work to schedule
*
* This function adds a work defined by @wrk to the tail of the pending works
* list. Can only be used of ubi->work_sem is already held in read mode!
* list. Can only be used if ubi->work_sem is already held in read mode!
*/
static void __schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
{
......@@ -864,7 +864,7 @@ static void schedule_ubi_work(struct ubi_device *ubi, struct ubi_work *wrk)
}
static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
int cancel);
int shutdown);
#ifdef CONFIG_MTD_UBI_FASTMAP
/**
......@@ -990,14 +990,15 @@ int ubi_wl_put_fm_peb(struct ubi_device *ubi, struct ubi_wl_entry *fm_e,
* wear_leveling_worker - wear-leveling worker function.
* @ubi: UBI device description object
* @wrk: the work object
* @cancel: non-zero if the worker has to free memory and exit
* @shutdown: non-zero if the worker has to free memory and exit
* because the WL-subsystem is shutting down
*
* This function copies a more worn out physical eraseblock to a less worn out
* one. Returns zero in case of success and a negative error code in case of
* failure.
*/
static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
int cancel)
int shutdown)
{
int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
int vol_id = -1, uninitialized_var(lnum);
......@@ -1008,7 +1009,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
struct ubi_vid_hdr *vid_hdr;
kfree(wrk);
if (cancel)
if (shutdown)
return 0;
vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
......@@ -1407,7 +1408,8 @@ int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
* erase_worker - physical eraseblock erase worker function.
* @ubi: UBI device description object
* @wl_wrk: the work object
* @cancel: non-zero if the worker has to free memory and exit
* @shutdown: non-zero if the worker has to free memory and exit
* because the WL sub-system is shutting down
*
* This function erases a physical eraseblock and perform torture testing if
* needed. It also takes care about marking the physical eraseblock bad if
......@@ -1415,7 +1417,7 @@ int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
* failure.
*/
static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
int cancel)
int shutdown)
{
struct ubi_wl_entry *e = wl_wrk->e;
int pnum = e->pnum;
......@@ -1423,7 +1425,7 @@ static int erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk,
int lnum = wl_wrk->lnum;
int err, available_consumed = 0;
if (cancel) {
if (shutdown) {
dbg_wl("cancel erasure of PEB %d EC %d", pnum, e->ec);
kfree(wl_wrk);
kmem_cache_free(ubi_wl_entry_slab, e);
......@@ -1845,10 +1847,10 @@ int ubi_thread(void *u)
}
/**
* cancel_pending - cancel all pending works.
* shutdown_work - shutdown all pending works.
* @ubi: UBI device description object
*/
static void cancel_pending(struct ubi_device *ubi)
static void shutdown_work(struct ubi_device *ubi)
{
while (!list_empty(&ubi->works)) {
struct ubi_work *wrk;
......@@ -1997,7 +1999,7 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
return 0;
out_free:
cancel_pending(ubi);
shutdown_work(ubi);
tree_destroy(&ubi->used);
tree_destroy(&ubi->free);
tree_destroy(&ubi->scrub);
......@@ -2029,7 +2031,7 @@ static void protection_queue_destroy(struct ubi_device *ubi)
void ubi_wl_close(struct ubi_device *ubi)
{
dbg_wl("close the WL sub-system");
cancel_pending(ubi);
shutdown_work(ubi);
protection_queue_destroy(ubi);
tree_destroy(&ubi->used);
tree_destroy(&ubi->erroneous);
......
......@@ -164,10 +164,6 @@ static int do_commit(struct ubifs_info *c)
if (err)
goto out;
err = ubifs_orphan_end_commit(c);
if (err)
goto out;
old_ltail_lnum = c->ltail_lnum;
err = ubifs_log_end_commit(c, new_ltail_lnum);
if (err)
goto out;
err = dbg_check_old_index(c, &zroot);
......@@ -202,7 +198,9 @@ static int do_commit(struct ubifs_info *c)
c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
else
c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_NO_ORPHS);
err = ubifs_write_master(c);
old_ltail_lnum = c->ltail_lnum;
err = ubifs_log_end_commit(c, new_ltail_lnum);
if (err)
goto out;
......
......@@ -334,9 +334,9 @@ void ubifs_dump_node(const struct ubifs_info *c, const void *node)
pr_err("\tkey_fmt %d (%s)\n",
(int)sup->key_fmt, get_key_fmt(sup->key_fmt));
pr_err("\tflags %#x\n", sup_flags);
pr_err("\t big_lpt %u\n",
pr_err("\tbig_lpt %u\n",
!!(sup_flags & UBIFS_FLG_BIGLPT));
pr_err("\t space_fixup %u\n",
pr_err("\tspace_fixup %u\n",
!!(sup_flags & UBIFS_FLG_SPACE_FIXUP));
pr_err("\tmin_io_size %u\n", le32_to_cpu(sup->min_io_size));
pr_err("\tleb_size %u\n", le32_to_cpu(sup->leb_size));
......@@ -2462,7 +2462,7 @@ static int power_cut_emulated(struct ubifs_info *c, int lnum, int write)
if (chance(1, 2)) {
d->pc_delay = 1;
/* Fail withing 1 minute */
/* Fail within 1 minute */
delay = prandom_u32() % 60000;
d->pc_timeout = jiffies;
d->pc_timeout += msecs_to_jiffies(delay);
......
......@@ -546,15 +546,14 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
int aligned_dlen, aligned_ilen, sync = IS_DIRSYNC(dir);
int last_reference = !!(deletion && inode->i_nlink == 0);
struct ubifs_inode *ui = ubifs_inode(inode);
struct ubifs_inode *dir_ui = ubifs_inode(dir);
struct ubifs_inode *host_ui = ubifs_inode(dir);
struct ubifs_dent_node *dent;
struct ubifs_ino_node *ino;
union ubifs_key dent_key, ino_key;
dbg_jnl("ino %lu, dent '%.*s', data len %d in dir ino %lu",
inode->i_ino, nm->len, nm->name, ui->data_len, dir->i_ino);
ubifs_assert(dir_ui->data_len == 0);
ubifs_assert(mutex_is_locked(&dir_ui->ui_mutex));
ubifs_assert(mutex_is_locked(&host_ui->ui_mutex));
dlen = UBIFS_DENT_NODE_SZ + nm->len + 1;
ilen = UBIFS_INO_NODE_SZ;
......@@ -658,7 +657,7 @@ int ubifs_jnl_update(struct ubifs_info *c, const struct inode *dir,
ui->synced_i_size = ui->ui_size;
spin_unlock(&ui->ui_lock);
mark_inode_clean(c, ui);
mark_inode_clean(c, dir_ui);
mark_inode_clean(c, host_ui);
return 0;
out_finish:
......
......@@ -106,10 +106,14 @@ static inline long long empty_log_bytes(const struct ubifs_info *c)
h = (long long)c->lhead_lnum * c->leb_size + c->lhead_offs;
t = (long long)c->ltail_lnum * c->leb_size;
if (h >= t)
if (h > t)
return c->log_bytes - h + t;
else
else if (h != t)
return t - h;
else if (c->lhead_lnum != c->ltail_lnum)
return 0;
else
return c->log_bytes;
}
/**
......@@ -447,9 +451,9 @@ int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum)
* @ltail_lnum: new log tail LEB number
*
* This function is called on when the commit operation was finished. It
* moves log tail to new position and unmaps LEBs which contain obsolete data.
* Returns zero in case of success and a negative error code in case of
* failure.
* moves log tail to new position and updates the master node so that it stores
* the new log tail LEB number. Returns zero in case of success and a negative
* error code in case of failure.
*/
int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
{
......@@ -477,7 +481,12 @@ int ubifs_log_end_commit(struct ubifs_info *c, int ltail_lnum)
spin_unlock(&c->buds_lock);
err = dbg_check_bud_bytes(c);
if (err)
goto out;
err = ubifs_write_master(c);
out:
mutex_unlock(&c->log_mutex);
return err;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment