Commit e3a251e3 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'upstream-5.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs

Pull UBI/UBIFS/JFFS2 updates from Richard Weinberger:
 "This pull request contains mostly fixes for UBI, UBIFS and JFFS2:

  UBI:

   - Fix a regression around producing a anchor PEB for fastmap.

     Due to a change in our locking fastmap was unable to produce fresh
     anchors an re-used the existing one a way to often.

  UBIFS:

   - Fixes for endianness. A few places blindly assumed little endian.

   - Fix for a memory leak in the orphan code.

   - Fix for a possible crash during a commit.

   - Revert a wrong bugfix.

  JFFS2:

   - Revert a bad bugfix (false positive from a code checking tool)"

* tag 'upstream-5.5-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/ubifs:
  Revert "jffs2: Fix possible null-pointer dereferences in jffs2_add_frag_to_fragtree()"
  ubi: Fix producing anchor PEBs
  ubifs: ubifs_tnc_start_commit: Fix OOB in layout_in_gaps
  ubifs: do_kill_orphans: Fix a memory leak bug
  Revert "ubifs: Fix memory leak bug in alloc_ubifs_info() error path"
  ubifs: Fix type of sup->hash_algo
  ubifs: Fixed missed le64_to_cpu() in journal
  ubifs: Force prandom result to __le32
  ubifs: Remove obsolete TODO from dfs_file_write()
  ubi: Fix warning static is not at beginning of declaration
  ubi: Print skip_check in ubi_dump_vol_info()
parents 97eeb4d9 6e78c01f
......@@ -107,6 +107,7 @@ void ubi_dump_vol_info(const struct ubi_volume *vol)
pr_err("\tlast_eb_bytes %d\n", vol->last_eb_bytes);
pr_err("\tcorrupted %d\n", vol->corrupted);
pr_err("\tupd_marker %d\n", vol->upd_marker);
pr_err("\tskip_check %d\n", vol->skip_check);
if (vol->name_len <= UBI_VOL_NAME_MAX &&
strnlen(vol->name, vol->name_len + 1) == vol->name_len) {
......
......@@ -57,18 +57,6 @@ static void return_unused_pool_pebs(struct ubi_device *ubi,
}
}
static int anchor_pebs_available(struct rb_root *root)
{
struct rb_node *p;
struct ubi_wl_entry *e;
ubi_rb_for_each_entry(p, e, root, u.rb)
if (e->pnum < UBI_FM_MAX_START)
return 1;
return 0;
}
/**
* ubi_wl_get_fm_peb - find a physical erase block with a given maximal number.
* @ubi: UBI device description object
......@@ -277,8 +265,26 @@ static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi)
int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
{
struct ubi_work *wrk;
struct ubi_wl_entry *anchor;
spin_lock(&ubi->wl_lock);
/* Do we already have an anchor? */
if (ubi->fm_anchor) {
spin_unlock(&ubi->wl_lock);
return 0;
}
/* See if we can find an anchor PEB on the list of free PEBs */
anchor = ubi_wl_get_fm_peb(ubi, 1);
if (anchor) {
ubi->fm_anchor = anchor;
spin_unlock(&ubi->wl_lock);
return 0;
}
/* No luck, trigger wear leveling to produce a new anchor PEB */
ubi->fm_do_produce_anchor = 1;
if (ubi->wl_scheduled) {
spin_unlock(&ubi->wl_lock);
return 0;
......@@ -294,7 +300,6 @@ int ubi_ensure_anchor_pebs(struct ubi_device *ubi)
return -ENOMEM;
}
wrk->anchor = 1;
wrk->func = &wear_leveling_worker;
__schedule_ubi_work(ubi, wrk);
return 0;
......
......@@ -1540,14 +1540,6 @@ int ubi_update_fastmap(struct ubi_device *ubi)
return 0;
}
ret = ubi_ensure_anchor_pebs(ubi);
if (ret) {
up_write(&ubi->fm_eba_sem);
up_write(&ubi->work_sem);
up_write(&ubi->fm_protect);
return ret;
}
new_fm = kzalloc(sizeof(*new_fm), GFP_KERNEL);
if (!new_fm) {
up_write(&ubi->fm_eba_sem);
......@@ -1618,7 +1610,8 @@ int ubi_update_fastmap(struct ubi_device *ubi)
}
spin_lock(&ubi->wl_lock);
tmp_e = ubi_wl_get_fm_peb(ubi, 1);
tmp_e = ubi->fm_anchor;
ubi->fm_anchor = NULL;
spin_unlock(&ubi->wl_lock);
if (old_fm) {
......@@ -1670,6 +1663,9 @@ int ubi_update_fastmap(struct ubi_device *ubi)
up_write(&ubi->work_sem);
up_write(&ubi->fm_protect);
kfree(old_fm);
ubi_ensure_anchor_pebs(ubi);
return ret;
err:
......
......@@ -491,6 +491,8 @@ struct ubi_debug_info {
* @fm_work: fastmap work queue
* @fm_work_scheduled: non-zero if fastmap work was scheduled
* @fast_attach: non-zero if UBI was attached by fastmap
* @fm_anchor: The next anchor PEB to use for fastmap
* @fm_do_produce_anchor: If true produce an anchor PEB in wl
*
* @used: RB-tree of used physical eraseblocks
* @erroneous: RB-tree of erroneous used physical eraseblocks
......@@ -599,6 +601,8 @@ struct ubi_device {
struct work_struct fm_work;
int fm_work_scheduled;
int fast_attach;
struct ubi_wl_entry *fm_anchor;
int fm_do_produce_anchor;
/* Wear-leveling sub-system's stuff */
struct rb_root used;
......@@ -789,7 +793,6 @@ struct ubi_attach_info {
* @vol_id: the volume ID on which this erasure is being performed
* @lnum: the logical eraseblock number
* @torture: if the physical eraseblock has to be tortured
* @anchor: produce a anchor PEB to by used by fastmap
*
* The @func pointer points to the worker function. If the @shutdown argument is
* not zero, the worker has to free the resources and exit immediately as the
......@@ -805,7 +808,6 @@ struct ubi_work {
int vol_id;
int lnum;
int torture;
int anchor;
};
#include "debug.h"
......@@ -968,7 +970,7 @@ int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count);
void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol);
#else
static inline int ubi_update_fastmap(struct ubi_device *ubi) { return 0; }
int static inline ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count) { return 0; }
static inline int ubi_fastmap_init_checkmap(struct ubi_volume *vol, int leb_count) { return 0; }
static inline void ubi_fastmap_destroy_checkmap(struct ubi_volume *vol) {}
#endif
......
......@@ -339,13 +339,6 @@ static struct ubi_wl_entry *find_wl_entry(struct ubi_device *ubi,
}
}
/* If no fastmap has been written and this WL entry can be used
* as anchor PEB, hold it back and return the second best WL entry
* such that fastmap can use the anchor PEB later. */
if (prev_e && !ubi->fm_disabled &&
!ubi->fm && e->pnum < UBI_FM_MAX_START)
return prev_e;
return e;
}
......@@ -656,9 +649,6 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
{
int err, scrubbing = 0, torture = 0, protect = 0, erroneous = 0;
int erase = 0, keep = 0, vol_id = -1, lnum = -1;
#ifdef CONFIG_MTD_UBI_FASTMAP
int anchor = wrk->anchor;
#endif
struct ubi_wl_entry *e1, *e2;
struct ubi_vid_io_buf *vidb;
struct ubi_vid_hdr *vid_hdr;
......@@ -698,11 +688,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
}
#ifdef CONFIG_MTD_UBI_FASTMAP
/* Check whether we need to produce an anchor PEB */
if (!anchor)
anchor = !anchor_pebs_available(&ubi->free);
if (anchor) {
if (ubi->fm_do_produce_anchor) {
e1 = find_anchor_wl_entry(&ubi->used);
if (!e1)
goto out_cancel;
......@@ -719,6 +705,7 @@ static int wear_leveling_worker(struct ubi_device *ubi, struct ubi_work *wrk,
self_check_in_wl_tree(ubi, e1, &ubi->used);
rb_erase(&e1->u.rb, &ubi->used);
dbg_wl("anchor-move PEB %d to PEB %d", e1->pnum, e2->pnum);
ubi->fm_do_produce_anchor = 0;
} else if (!ubi->scrub.rb_node) {
#else
if (!ubi->scrub.rb_node) {
......@@ -1051,7 +1038,6 @@ static int ensure_wear_leveling(struct ubi_device *ubi, int nested)
goto out_cancel;
}
wrk->anchor = 0;
wrk->func = &wear_leveling_worker;
if (nested)
__schedule_ubi_work(ubi, wrk);
......@@ -1093,8 +1079,15 @@ static int __erase_worker(struct ubi_device *ubi, struct ubi_work *wl_wrk)
err = sync_erase(ubi, e, wl_wrk->torture);
if (!err) {
spin_lock(&ubi->wl_lock);
wl_tree_add(e, &ubi->free);
ubi->free_count++;
if (!ubi->fm_anchor && e->pnum < UBI_FM_MAX_START) {
ubi->fm_anchor = e;
ubi->fm_do_produce_anchor = 0;
} else {
wl_tree_add(e, &ubi->free);
ubi->free_count++;
}
spin_unlock(&ubi->wl_lock);
/*
......@@ -1882,6 +1875,9 @@ int ubi_wl_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
if (err)
goto out_free;
#ifdef CONFIG_MTD_UBI_FASTMAP
ubi_ensure_anchor_pebs(ubi);
#endif
return 0;
out_free:
......
......@@ -2,7 +2,6 @@
#ifndef UBI_WL_H
#define UBI_WL_H
#ifdef CONFIG_MTD_UBI_FASTMAP
static int anchor_pebs_available(struct rb_root *root);
static void update_fastmap_work_fn(struct work_struct *wrk);
static struct ubi_wl_entry *find_anchor_wl_entry(struct rb_root *root);
static struct ubi_wl_entry *get_peb_for_wl(struct ubi_device *ubi);
......
......@@ -226,7 +226,7 @@ static int jffs2_add_frag_to_fragtree(struct jffs2_sb_info *c, struct rb_root *r
lastend = this->ofs + this->size;
} else {
dbg_fragtree2("lookup gave no frag\n");
return -EINVAL;
lastend = 0;
}
/* See if we ran off the end of the fragtree */
......
......@@ -2737,18 +2737,6 @@ static ssize_t dfs_file_write(struct file *file, const char __user *u,
struct dentry *dent = file->f_path.dentry;
int val;
/*
* TODO: this is racy - the file-system might have already been
* unmounted and we'd oops in this case. The plan is to fix it with
* help of 'iterate_supers_type()' which we should have in v3.0: when
* a debugfs opened, we rember FS's UUID in file->private_data. Then
* whenever we access the FS via a debugfs file, we iterate all UBIFS
* superblocks and fine the one with the same UUID, and take the
* locking right.
*
* The other way to go suggested by Al Viro is to create a separate
* 'ubifs-debug' file-system instead.
*/
if (file->f_path.dentry == d->dfs_dump_lprops) {
ubifs_dump_lprops(c);
return count;
......
......@@ -503,7 +503,7 @@ static void mark_inode_clean(struct ubifs_info *c, struct ubifs_inode *ui)
static void set_dent_cookie(struct ubifs_info *c, struct ubifs_dent_node *dent)
{
if (c->double_hash)
dent->cookie = prandom_u32();
dent->cookie = (__force __le32) prandom_u32();
else
dent->cookie = 0;
}
......@@ -899,7 +899,7 @@ int ubifs_jnl_write_inode(struct ubifs_info *c, const struct inode *inode)
fname_name(&nm) = xent->name;
fname_len(&nm) = le16_to_cpu(xent->nlen);
xino = ubifs_iget(c->vfs_sb, xent->inum);
xino = ubifs_iget(c->vfs_sb, le64_to_cpu(xent->inum));
if (IS_ERR(xino)) {
err = PTR_ERR(xino);
ubifs_err(c, "dead directory entry '%s', error %d",
......
......@@ -631,12 +631,17 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
ino_t inum;
int i, n, err, first = 1;
ino = kmalloc(UBIFS_MAX_INO_NODE_SZ, GFP_NOFS);
if (!ino)
return -ENOMEM;
list_for_each_entry(snod, &sleb->nodes, list) {
if (snod->type != UBIFS_ORPH_NODE) {
ubifs_err(c, "invalid node type %d in orphan area at %d:%d",
snod->type, sleb->lnum, snod->offs);
ubifs_dump_node(c, snod->node);
return -EINVAL;
err = -EINVAL;
goto out_free;
}
orph = snod->node;
......@@ -663,20 +668,18 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
ubifs_err(c, "out of order commit number %llu in orphan node at %d:%d",
cmt_no, sleb->lnum, snod->offs);
ubifs_dump_node(c, snod->node);
return -EINVAL;
err = -EINVAL;
goto out_free;
}
dbg_rcvry("out of date LEB %d", sleb->lnum);
*outofdate = 1;
return 0;
err = 0;
goto out_free;
}
if (first)
first = 0;
ino = kmalloc(UBIFS_MAX_INO_NODE_SZ, GFP_NOFS);
if (!ino)
return -ENOMEM;
n = (le32_to_cpu(orph->ch.len) - UBIFS_ORPH_NODE_SZ) >> 3;
for (i = 0; i < n; i++) {
union ubifs_key key1, key2;
......
......@@ -184,7 +184,7 @@ static int create_default_filesystem(struct ubifs_info *c)
if (err)
goto out;
} else {
sup->hash_algo = 0xffff;
sup->hash_algo = cpu_to_le16(0xffff);
}
sup->ch.node_type = UBIFS_SB_NODE;
......
......@@ -2267,10 +2267,8 @@ static struct dentry *ubifs_mount(struct file_system_type *fs_type, int flags,
}
} else {
err = ubifs_fill_super(sb, data, flags & SB_SILENT ? 1 : 0);
if (err) {
kfree(c);
if (err)
goto out_deact;
}
/* We do not support atime */
sb->s_flags |= SB_ACTIVE;
if (IS_ENABLED(CONFIG_UBIFS_ATIME_SUPPORT))
......
......@@ -212,7 +212,7 @@ static int is_idx_node_in_use(struct ubifs_info *c, union ubifs_key *key,
/**
* layout_leb_in_gaps - layout index nodes using in-the-gaps method.
* @c: UBIFS file-system description object
* @p: return LEB number here
* @p: return LEB number in @c->gap_lebs[p]
*
* This function lays out new index nodes for dirty znodes using in-the-gaps
* method of TNC commit.
......@@ -221,7 +221,7 @@ static int is_idx_node_in_use(struct ubifs_info *c, union ubifs_key *key,
* This function returns the number of index nodes written into the gaps, or a
* negative error code on failure.
*/
static int layout_leb_in_gaps(struct ubifs_info *c, int *p)
static int layout_leb_in_gaps(struct ubifs_info *c, int p)
{
struct ubifs_scan_leb *sleb;
struct ubifs_scan_node *snod;
......@@ -236,7 +236,7 @@ static int layout_leb_in_gaps(struct ubifs_info *c, int *p)
* filled, however we do not check there at present.
*/
return lnum; /* Error code */
*p = lnum;
c->gap_lebs[p] = lnum;
dbg_gc("LEB %d", lnum);
/*
* Scan the index LEB. We use the generic scan for this even though
......@@ -355,7 +355,7 @@ static int get_leb_cnt(struct ubifs_info *c, int cnt)
*/
static int layout_in_gaps(struct ubifs_info *c, int cnt)
{
int err, leb_needed_cnt, written, *p;
int err, leb_needed_cnt, written, p = 0, old_idx_lebs, *gap_lebs;
dbg_gc("%d znodes to write", cnt);
......@@ -364,9 +364,9 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt)
if (!c->gap_lebs)
return -ENOMEM;
p = c->gap_lebs;
old_idx_lebs = c->lst.idx_lebs;
do {
ubifs_assert(c, p < c->gap_lebs + c->lst.idx_lebs);
ubifs_assert(c, p < c->lst.idx_lebs);
written = layout_leb_in_gaps(c, p);
if (written < 0) {
err = written;
......@@ -392,9 +392,29 @@ static int layout_in_gaps(struct ubifs_info *c, int cnt)
leb_needed_cnt = get_leb_cnt(c, cnt);
dbg_gc("%d znodes remaining, need %d LEBs, have %d", cnt,
leb_needed_cnt, c->ileb_cnt);
/*
* Dynamically change the size of @c->gap_lebs to prevent
* oob, because @c->lst.idx_lebs could be increased by
* function @get_idx_gc_leb (called by layout_leb_in_gaps->
* ubifs_find_dirty_idx_leb) during loop. Only enlarge
* @c->gap_lebs when needed.
*
*/
if (leb_needed_cnt > c->ileb_cnt && p >= old_idx_lebs &&
old_idx_lebs < c->lst.idx_lebs) {
old_idx_lebs = c->lst.idx_lebs;
gap_lebs = krealloc(c->gap_lebs, sizeof(int) *
(old_idx_lebs + 1), GFP_NOFS);
if (!gap_lebs) {
kfree(c->gap_lebs);
c->gap_lebs = NULL;
return -ENOMEM;
}
c->gap_lebs = gap_lebs;
}
} while (leb_needed_cnt > c->ileb_cnt);
*p = -1;
c->gap_lebs[p] = -1;
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment