Commit 33e6c1a0 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'linux-next' of git://git.infradead.org/ubifs-2.6

* 'linux-next' of git://git.infradead.org/ubifs-2.6:
  UBIFS: fix debugging dump
  UBIFS: improve lprops dump
  UBIFS: various minor commentary fixes
  UBIFS: improve journal head debugging prints
  UBIFS: define journal head numbers in ubifs-media.h
  UBIFS: amend commentaries
  UBIFS: check ubifs_scan error codes better
  UBIFS: do not print scary error messages needlessly
  UBIFS: add inode size debugging check
  UBIFS: constify file and inode operations
  UBIFS: remove unneeded call from ubifs_sync_fs
  UBIFS: kill BKL
  UBIFS: remove unused functions
  UBIFS: suppress compilation warning
parents 0b887ef1 7cce2f4c
...@@ -715,7 +715,7 @@ long long ubifs_get_free_space_nolock(struct ubifs_info *c) ...@@ -715,7 +715,7 @@ long long ubifs_get_free_space_nolock(struct ubifs_info *c)
* ubifs_get_free_space - return amount of free space. * ubifs_get_free_space - return amount of free space.
* @c: UBIFS file-system description object * @c: UBIFS file-system description object
* *
* This function calculates and retuns amount of free space to report to * This function calculates and returns amount of free space to report to
* user-space. * user-space.
*/ */
long long ubifs_get_free_space(struct ubifs_info *c) long long ubifs_get_free_space(struct ubifs_info *c)
......
...@@ -510,7 +510,7 @@ int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot) ...@@ -510,7 +510,7 @@ int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot)
int lnum, offs, len, err = 0, uninitialized_var(last_level), child_cnt; int lnum, offs, len, err = 0, uninitialized_var(last_level), child_cnt;
int first = 1, iip; int first = 1, iip;
struct ubifs_debug_info *d = c->dbg; struct ubifs_debug_info *d = c->dbg;
union ubifs_key lower_key, upper_key, l_key, u_key; union ubifs_key uninitialized_var(lower_key), upper_key, l_key, u_key;
unsigned long long uninitialized_var(last_sqnum); unsigned long long uninitialized_var(last_sqnum);
struct ubifs_idx_node *idx; struct ubifs_idx_node *idx;
struct list_head list; struct list_head list;
......
...@@ -210,6 +210,20 @@ const char *dbg_cstate(int cmt_state) ...@@ -210,6 +210,20 @@ const char *dbg_cstate(int cmt_state)
} }
} }
const char *dbg_jhead(int jhead)
{
switch (jhead) {
case GCHD:
return "0 (GC)";
case BASEHD:
return "1 (base)";
case DATAHD:
return "2 (data)";
default:
return "unknown journal head";
}
}
static void dump_ch(const struct ubifs_ch *ch) static void dump_ch(const struct ubifs_ch *ch)
{ {
printk(KERN_DEBUG "\tmagic %#x\n", le32_to_cpu(ch->magic)); printk(KERN_DEBUG "\tmagic %#x\n", le32_to_cpu(ch->magic));
...@@ -623,8 +637,9 @@ void dbg_dump_budg(struct ubifs_info *c) ...@@ -623,8 +637,9 @@ void dbg_dump_budg(struct ubifs_info *c)
/* If we are in R/O mode, journal heads do not exist */ /* If we are in R/O mode, journal heads do not exist */
if (c->jheads) if (c->jheads)
for (i = 0; i < c->jhead_cnt; i++) for (i = 0; i < c->jhead_cnt; i++)
printk(KERN_DEBUG "\tjhead %d\t LEB %d\n", printk(KERN_DEBUG "\tjhead %s\t LEB %d\n",
c->jheads[i].wbuf.jhead, c->jheads[i].wbuf.lnum); dbg_jhead(c->jheads[i].wbuf.jhead),
c->jheads[i].wbuf.lnum);
for (rb = rb_first(&c->buds); rb; rb = rb_next(rb)) { for (rb = rb_first(&c->buds); rb; rb = rb_next(rb)) {
bud = rb_entry(rb, struct ubifs_bud, rb); bud = rb_entry(rb, struct ubifs_bud, rb);
printk(KERN_DEBUG "\tbud LEB %d\n", bud->lnum); printk(KERN_DEBUG "\tbud LEB %d\n", bud->lnum);
...@@ -648,9 +663,90 @@ void dbg_dump_budg(struct ubifs_info *c) ...@@ -648,9 +663,90 @@ void dbg_dump_budg(struct ubifs_info *c)
void dbg_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp) void dbg_dump_lprop(const struct ubifs_info *c, const struct ubifs_lprops *lp)
{ {
printk(KERN_DEBUG "LEB %d lprops: free %d, dirty %d (used %d), " int i, spc, dark = 0, dead = 0;
"flags %#x\n", lp->lnum, lp->free, lp->dirty, struct rb_node *rb;
c->leb_size - lp->free - lp->dirty, lp->flags); struct ubifs_bud *bud;
spc = lp->free + lp->dirty;
if (spc < c->dead_wm)
dead = spc;
else
dark = ubifs_calc_dark(c, spc);
if (lp->flags & LPROPS_INDEX)
printk(KERN_DEBUG "LEB %-7d free %-8d dirty %-8d used %-8d "
"free + dirty %-8d flags %#x (", lp->lnum, lp->free,
lp->dirty, c->leb_size - spc, spc, lp->flags);
else
printk(KERN_DEBUG "LEB %-7d free %-8d dirty %-8d used %-8d "
"free + dirty %-8d dark %-4d dead %-4d nodes fit %-3d "
"flags %#-4x (", lp->lnum, lp->free, lp->dirty,
c->leb_size - spc, spc, dark, dead,
(int)(spc / UBIFS_MAX_NODE_SZ), lp->flags);
if (lp->flags & LPROPS_TAKEN) {
if (lp->flags & LPROPS_INDEX)
printk(KERN_CONT "index, taken");
else
printk(KERN_CONT "taken");
} else {
const char *s;
if (lp->flags & LPROPS_INDEX) {
switch (lp->flags & LPROPS_CAT_MASK) {
case LPROPS_DIRTY_IDX:
s = "dirty index";
break;
case LPROPS_FRDI_IDX:
s = "freeable index";
break;
default:
s = "index";
}
} else {
switch (lp->flags & LPROPS_CAT_MASK) {
case LPROPS_UNCAT:
s = "not categorized";
break;
case LPROPS_DIRTY:
s = "dirty";
break;
case LPROPS_FREE:
s = "free";
break;
case LPROPS_EMPTY:
s = "empty";
break;
case LPROPS_FREEABLE:
s = "freeable";
break;
default:
s = NULL;
break;
}
}
printk(KERN_CONT "%s", s);
}
for (rb = rb_first((struct rb_root *)&c->buds); rb; rb = rb_next(rb)) {
bud = rb_entry(rb, struct ubifs_bud, rb);
if (bud->lnum == lp->lnum) {
int head = 0;
for (i = 0; i < c->jhead_cnt; i++) {
if (lp->lnum == c->jheads[i].wbuf.lnum) {
printk(KERN_CONT ", jhead %s",
dbg_jhead(i));
head = 1;
}
}
if (!head)
printk(KERN_CONT ", bud of jhead %s",
dbg_jhead(bud->jhead));
}
}
if (lp->lnum == c->gc_lnum)
printk(KERN_CONT ", GC LEB");
printk(KERN_CONT ")\n");
} }
void dbg_dump_lprops(struct ubifs_info *c) void dbg_dump_lprops(struct ubifs_info *c)
...@@ -724,7 +820,7 @@ void dbg_dump_leb(const struct ubifs_info *c, int lnum) ...@@ -724,7 +820,7 @@ void dbg_dump_leb(const struct ubifs_info *c, int lnum)
printk(KERN_DEBUG "(pid %d) start dumping LEB %d\n", printk(KERN_DEBUG "(pid %d) start dumping LEB %d\n",
current->pid, lnum); current->pid, lnum);
sleb = ubifs_scan(c, lnum, 0, c->dbg->buf); sleb = ubifs_scan(c, lnum, 0, c->dbg->buf, 0);
if (IS_ERR(sleb)) { if (IS_ERR(sleb)) {
ubifs_err("scan error %d", (int)PTR_ERR(sleb)); ubifs_err("scan error %d", (int)PTR_ERR(sleb));
return; return;
...@@ -909,8 +1005,10 @@ int dbg_check_space_info(struct ubifs_info *c) ...@@ -909,8 +1005,10 @@ int dbg_check_space_info(struct ubifs_info *c)
ubifs_msg("saved lprops statistics dump"); ubifs_msg("saved lprops statistics dump");
dbg_dump_lstats(&d->saved_lst); dbg_dump_lstats(&d->saved_lst);
ubifs_get_lp_stats(c, &lst); ubifs_get_lp_stats(c, &lst);
ubifs_msg("current lprops statistics dump"); ubifs_msg("current lprops statistics dump");
dbg_dump_lstats(&d->saved_lst); dbg_dump_lstats(&lst);
spin_lock(&c->space_lock); spin_lock(&c->space_lock);
dbg_dump_budg(c); dbg_dump_budg(c);
spin_unlock(&c->space_lock); spin_unlock(&c->space_lock);
......
...@@ -271,6 +271,7 @@ void ubifs_debugging_exit(struct ubifs_info *c); ...@@ -271,6 +271,7 @@ void ubifs_debugging_exit(struct ubifs_info *c);
/* Dump functions */ /* Dump functions */
const char *dbg_ntype(int type); const char *dbg_ntype(int type);
const char *dbg_cstate(int cmt_state); const char *dbg_cstate(int cmt_state);
const char *dbg_jhead(int jhead);
const char *dbg_get_key_dump(const struct ubifs_info *c, const char *dbg_get_key_dump(const struct ubifs_info *c,
const union ubifs_key *key); const union ubifs_key *key);
void dbg_dump_inode(const struct ubifs_info *c, const struct inode *inode); void dbg_dump_inode(const struct ubifs_info *c, const struct inode *inode);
...@@ -321,6 +322,8 @@ void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat, ...@@ -321,6 +322,8 @@ void dbg_check_heap(struct ubifs_info *c, struct ubifs_lpt_heap *heap, int cat,
int dbg_check_lprops(struct ubifs_info *c); int dbg_check_lprops(struct ubifs_info *c);
int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode, int dbg_check_lpt_nodes(struct ubifs_info *c, struct ubifs_cnode *cnode,
int row, int col); int row, int col);
int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode,
loff_t size);
/* Force the use of in-the-gaps method for testing */ /* Force the use of in-the-gaps method for testing */
...@@ -425,6 +428,7 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c); ...@@ -425,6 +428,7 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c);
#define dbg_ntype(type) "" #define dbg_ntype(type) ""
#define dbg_cstate(cmt_state) "" #define dbg_cstate(cmt_state) ""
#define dbg_jhead(jhead) ""
#define dbg_get_key_dump(c, key) ({}) #define dbg_get_key_dump(c, key) ({})
#define dbg_dump_inode(c, inode) ({}) #define dbg_dump_inode(c, inode) ({})
#define dbg_dump_node(c, node) ({}) #define dbg_dump_node(c, node) ({})
...@@ -460,6 +464,7 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c); ...@@ -460,6 +464,7 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c);
#define dbg_check_heap(c, heap, cat, add_pos) ({}) #define dbg_check_heap(c, heap, cat, add_pos) ({})
#define dbg_check_lprops(c) 0 #define dbg_check_lprops(c) 0
#define dbg_check_lpt_nodes(c, cnode, row, col) 0 #define dbg_check_lpt_nodes(c, cnode, row, col) 0
#define dbg_check_inode_size(c, inode, size) 0
#define dbg_force_in_the_gaps_enabled 0 #define dbg_force_in_the_gaps_enabled 0
#define dbg_force_in_the_gaps() 0 #define dbg_force_in_the_gaps() 0
#define dbg_failure_mode 0 #define dbg_failure_mode 0
......
...@@ -21,34 +21,32 @@ ...@@ -21,34 +21,32 @@
*/ */
/* /*
* This file implements VFS file and inode operations of regular files, device * This file implements VFS file and inode operations for regular files, device
* nodes and symlinks as well as address space operations. * nodes and symlinks as well as address space operations.
* *
* UBIFS uses 2 page flags: PG_private and PG_checked. PG_private is set if the * UBIFS uses 2 page flags: @PG_private and @PG_checked. @PG_private is set if
* page is dirty and is used for budgeting purposes - dirty pages should not be * the page is dirty and is used for optimization purposes - dirty pages are
* budgeted. The PG_checked flag is set if full budgeting is required for the * not budgeted so the flag shows that 'ubifs_write_end()' should not release
* page e.g., when it corresponds to a file hole or it is just beyond the file * the budget for this page. The @PG_checked flag is set if full budgeting is
* size. The budgeting is done in 'ubifs_write_begin()', because it is OK to * required for the page e.g., when it corresponds to a file hole or it is
* fail in this function, and the budget is released in 'ubifs_write_end()'. So * beyond the file size. The budgeting is done in 'ubifs_write_begin()', because
* the PG_private and PG_checked flags carry the information about how the page * it is OK to fail in this function, and the budget is released in
* was budgeted, to make it possible to release the budget properly. * 'ubifs_write_end()'. So the @PG_private and @PG_checked flags carry
* information about how the page was budgeted, to make it possible to release
* the budget properly.
* *
* A thing to keep in mind: inode's 'i_mutex' is locked in most VFS operations * A thing to keep in mind: inode @i_mutex is locked in most VFS operations we
* we implement. However, this is not true for '->writepage()', which might be * implement. However, this is not true for 'ubifs_writepage()', which may be
* called with 'i_mutex' unlocked. For example, when pdflush is performing * called with @i_mutex unlocked. For example, when pdflush is doing background
* write-back, it calls 'writepage()' with unlocked 'i_mutex', although the * write-back, it calls 'ubifs_writepage()' with unlocked @i_mutex. At "normal"
* inode has 'I_LOCK' flag in this case. At "normal" work-paths 'i_mutex' is * work-paths the @i_mutex is locked in 'ubifs_writepage()', e.g. in the
* locked in '->writepage', e.g. in "sys_write -> alloc_pages -> direct reclaim * "sys_write -> alloc_pages -> direct reclaim path". So, in 'ubifs_writepage()'
* path'. So, in '->writepage()' we are only guaranteed that the page is * we are only guaranteed that the page is locked.
* locked.
* *
* Similarly, 'i_mutex' does not have to be locked in readpage(), e.g., * Similarly, @i_mutex is not always locked in 'ubifs_readpage()', e.g., the
* readahead path does not have it locked ("sys_read -> generic_file_aio_read * read-ahead path does not lock it ("sys_read -> generic_file_aio_read ->
* -> ondemand_readahead -> readpage"). In case of readahead, 'I_LOCK' flag is * ondemand_readahead -> readpage"). In case of readahead, @I_LOCK flag is not
* not set as well. However, UBIFS disables readahead. * set as well. However, UBIFS disables readahead.
*
* This, for example means that there might be 2 concurrent '->writepage()'
* calls for the same inode, but different inode dirty pages.
*/ */
#include "ubifs.h" #include "ubifs.h"
...@@ -449,9 +447,9 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, ...@@ -449,9 +447,9 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
/* /*
* We change whole page so no need to load it. But we * We change whole page so no need to load it. But we
* have to set the @PG_checked flag to make the further * have to set the @PG_checked flag to make the further
* code the page is new. This might be not true, but it * code know that the page is new. This might be not
* is better to budget more that to read the page from * true, but it is better to budget more than to read
* the media. * the page from the media.
*/ */
SetPageChecked(page); SetPageChecked(page);
skipped_read = 1; skipped_read = 1;
...@@ -497,8 +495,8 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping, ...@@ -497,8 +495,8 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
} }
/* /*
* Whee, we aquired budgeting quickly - without involving * Whee, we acquired budgeting quickly - without involving
* garbage-collection, committing or forceing write-back. We return * garbage-collection, committing or forcing write-back. We return
* with @ui->ui_mutex locked if we are appending pages, and unlocked * with @ui->ui_mutex locked if we are appending pages, and unlocked
* otherwise. This is an optimization (slightly hacky though). * otherwise. This is an optimization (slightly hacky though).
*/ */
...@@ -562,7 +560,7 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping, ...@@ -562,7 +560,7 @@ static int ubifs_write_end(struct file *file, struct address_space *mapping,
/* /*
* Return 0 to force VFS to repeat the whole operation, or the * Return 0 to force VFS to repeat the whole operation, or the
* error code if 'do_readpage()' failes. * error code if 'do_readpage()' fails.
*/ */
copied = do_readpage(page); copied = do_readpage(page);
goto out; goto out;
...@@ -1175,11 +1173,11 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode, ...@@ -1175,11 +1173,11 @@ static int do_truncation(struct ubifs_info *c, struct inode *inode,
ui->ui_size = inode->i_size; ui->ui_size = inode->i_size;
/* Truncation changes inode [mc]time */ /* Truncation changes inode [mc]time */
inode->i_mtime = inode->i_ctime = ubifs_current_time(inode); inode->i_mtime = inode->i_ctime = ubifs_current_time(inode);
/* The other attributes may be changed at the same time as well */ /* Other attributes may be changed at the same time as well */
do_attr_changes(inode, attr); do_attr_changes(inode, attr);
err = ubifs_jnl_truncate(c, inode, old_size, new_size); err = ubifs_jnl_truncate(c, inode, old_size, new_size);
mutex_unlock(&ui->ui_mutex); mutex_unlock(&ui->ui_mutex);
out_budg: out_budg:
if (budgeted) if (budgeted)
ubifs_release_budget(c, &req); ubifs_release_budget(c, &req);
......
...@@ -529,7 +529,7 @@ int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp) ...@@ -529,7 +529,7 @@ int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp)
* We scan the entire LEB even though we only really need to scan up to * We scan the entire LEB even though we only really need to scan up to
* (c->leb_size - lp->free). * (c->leb_size - lp->free).
*/ */
sleb = ubifs_scan(c, lnum, 0, c->sbuf); sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0);
if (IS_ERR(sleb)) if (IS_ERR(sleb))
return PTR_ERR(sleb); return PTR_ERR(sleb);
......
...@@ -297,7 +297,7 @@ static enum hrtimer_restart wbuf_timer_callback_nolock(struct hrtimer *timer) ...@@ -297,7 +297,7 @@ static enum hrtimer_restart wbuf_timer_callback_nolock(struct hrtimer *timer)
{ {
struct ubifs_wbuf *wbuf = container_of(timer, struct ubifs_wbuf, timer); struct ubifs_wbuf *wbuf = container_of(timer, struct ubifs_wbuf, timer);
dbg_io("jhead %d", wbuf->jhead); dbg_io("jhead %s", dbg_jhead(wbuf->jhead));
wbuf->need_sync = 1; wbuf->need_sync = 1;
wbuf->c->need_wbuf_sync = 1; wbuf->c->need_wbuf_sync = 1;
ubifs_wake_up_bgt(wbuf->c); ubifs_wake_up_bgt(wbuf->c);
...@@ -314,7 +314,8 @@ static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf) ...@@ -314,7 +314,8 @@ static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf)
if (wbuf->no_timer) if (wbuf->no_timer)
return; return;
dbg_io("set timer for jhead %d, %llu-%llu millisecs", wbuf->jhead, dbg_io("set timer for jhead %s, %llu-%llu millisecs",
dbg_jhead(wbuf->jhead),
div_u64(ktime_to_ns(wbuf->softlimit), USEC_PER_SEC), div_u64(ktime_to_ns(wbuf->softlimit), USEC_PER_SEC),
div_u64(ktime_to_ns(wbuf->softlimit) + wbuf->delta, div_u64(ktime_to_ns(wbuf->softlimit) + wbuf->delta,
USEC_PER_SEC)); USEC_PER_SEC));
...@@ -351,8 +352,8 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf) ...@@ -351,8 +352,8 @@ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf)
/* Write-buffer is empty or not seeked */ /* Write-buffer is empty or not seeked */
return 0; return 0;
dbg_io("LEB %d:%d, %d bytes, jhead %d", dbg_io("LEB %d:%d, %d bytes, jhead %s",
wbuf->lnum, wbuf->offs, wbuf->used, wbuf->jhead); wbuf->lnum, wbuf->offs, wbuf->used, dbg_jhead(wbuf->jhead));
ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY)); ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY));
ubifs_assert(!(wbuf->avail & 7)); ubifs_assert(!(wbuf->avail & 7));
ubifs_assert(wbuf->offs + c->min_io_size <= c->leb_size); ubifs_assert(wbuf->offs + c->min_io_size <= c->leb_size);
...@@ -401,7 +402,7 @@ int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs, ...@@ -401,7 +402,7 @@ int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs,
{ {
const struct ubifs_info *c = wbuf->c; const struct ubifs_info *c = wbuf->c;
dbg_io("LEB %d:%d, jhead %d", lnum, offs, wbuf->jhead); dbg_io("LEB %d:%d, jhead %s", lnum, offs, dbg_jhead(wbuf->jhead));
ubifs_assert(lnum >= 0 && lnum < c->leb_cnt); ubifs_assert(lnum >= 0 && lnum < c->leb_cnt);
ubifs_assert(offs >= 0 && offs <= c->leb_size); ubifs_assert(offs >= 0 && offs <= c->leb_size);
ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7)); ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7));
...@@ -508,9 +509,9 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) ...@@ -508,9 +509,9 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
struct ubifs_info *c = wbuf->c; struct ubifs_info *c = wbuf->c;
int err, written, n, aligned_len = ALIGN(len, 8), offs; int err, written, n, aligned_len = ALIGN(len, 8), offs;
dbg_io("%d bytes (%s) to jhead %d wbuf at LEB %d:%d", len, dbg_io("%d bytes (%s) to jhead %s wbuf at LEB %d:%d", len,
dbg_ntype(((struct ubifs_ch *)buf)->node_type), wbuf->jhead, dbg_ntype(((struct ubifs_ch *)buf)->node_type),
wbuf->lnum, wbuf->offs + wbuf->used); dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs + wbuf->used);
ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt); ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt);
ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0); ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0);
ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size); ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size);
...@@ -535,8 +536,8 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) ...@@ -535,8 +536,8 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
memcpy(wbuf->buf + wbuf->used, buf, len); memcpy(wbuf->buf + wbuf->used, buf, len);
if (aligned_len == wbuf->avail) { if (aligned_len == wbuf->avail) {
dbg_io("flush jhead %d wbuf to LEB %d:%d", dbg_io("flush jhead %s wbuf to LEB %d:%d",
wbuf->jhead, wbuf->lnum, wbuf->offs); dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf,
wbuf->offs, c->min_io_size, wbuf->offs, c->min_io_size,
wbuf->dtype); wbuf->dtype);
...@@ -564,8 +565,8 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) ...@@ -564,8 +565,8 @@ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len)
* minimal I/O unit. We have to fill and flush write-buffer and switch * minimal I/O unit. We have to fill and flush write-buffer and switch
* to the next min. I/O unit. * to the next min. I/O unit.
*/ */
dbg_io("flush jhead %d wbuf to LEB %d:%d", dbg_io("flush jhead %s wbuf to LEB %d:%d",
wbuf->jhead, wbuf->lnum, wbuf->offs); dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs);
memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail); memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail);
err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs, err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs,
c->min_io_size, wbuf->dtype); c->min_io_size, wbuf->dtype);
...@@ -698,8 +699,8 @@ int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len, ...@@ -698,8 +699,8 @@ int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
int err, rlen, overlap; int err, rlen, overlap;
struct ubifs_ch *ch = buf; struct ubifs_ch *ch = buf;
dbg_io("LEB %d:%d, %s, length %d, jhead %d", lnum, offs, dbg_io("LEB %d:%d, %s, length %d, jhead %s", lnum, offs,
dbg_ntype(type), len, wbuf->jhead); dbg_ntype(type), len, dbg_jhead(wbuf->jhead));
ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0); ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0);
ubifs_assert(!(offs & 7) && offs < c->leb_size); ubifs_assert(!(offs & 7) && offs < c->leb_size);
ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT); ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT);
......
...@@ -158,7 +158,7 @@ static int reserve_space(struct ubifs_info *c, int jhead, int len) ...@@ -158,7 +158,7 @@ static int reserve_space(struct ubifs_info *c, int jhead, int len)
* some. But the write-buffer mutex has to be unlocked because * some. But the write-buffer mutex has to be unlocked because
* GC also takes it. * GC also takes it.
*/ */
dbg_jnl("no free space jhead %d, run GC", jhead); dbg_jnl("no free space in jhead %s, run GC", dbg_jhead(jhead));
mutex_unlock(&wbuf->io_mutex); mutex_unlock(&wbuf->io_mutex);
lnum = ubifs_garbage_collect(c, 0); lnum = ubifs_garbage_collect(c, 0);
...@@ -173,7 +173,8 @@ static int reserve_space(struct ubifs_info *c, int jhead, int len) ...@@ -173,7 +173,8 @@ static int reserve_space(struct ubifs_info *c, int jhead, int len)
* because we dropped @wbuf->io_mutex, so try once * because we dropped @wbuf->io_mutex, so try once
* again. * again.
*/ */
dbg_jnl("GC couldn't make a free LEB for jhead %d", jhead); dbg_jnl("GC couldn't make a free LEB for jhead %s",
dbg_jhead(jhead));
if (retries++ < 2) { if (retries++ < 2) {
dbg_jnl("retry (%d)", retries); dbg_jnl("retry (%d)", retries);
goto again; goto again;
...@@ -184,7 +185,7 @@ static int reserve_space(struct ubifs_info *c, int jhead, int len) ...@@ -184,7 +185,7 @@ static int reserve_space(struct ubifs_info *c, int jhead, int len)
} }
mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead);
dbg_jnl("got LEB %d for jhead %d", lnum, jhead); dbg_jnl("got LEB %d for jhead %s", lnum, dbg_jhead(jhead));
avail = c->leb_size - wbuf->offs - wbuf->used; avail = c->leb_size - wbuf->offs - wbuf->used;
if (wbuf->lnum != -1 && avail >= len) { if (wbuf->lnum != -1 && avail >= len) {
...@@ -255,7 +256,8 @@ static int write_node(struct ubifs_info *c, int jhead, void *node, int len, ...@@ -255,7 +256,8 @@ static int write_node(struct ubifs_info *c, int jhead, void *node, int len,
*lnum = c->jheads[jhead].wbuf.lnum; *lnum = c->jheads[jhead].wbuf.lnum;
*offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used; *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
dbg_jnl("jhead %d, LEB %d:%d, len %d", jhead, *lnum, *offs, len); dbg_jnl("jhead %s, LEB %d:%d, len %d",
dbg_jhead(jhead), *lnum, *offs, len);
ubifs_prepare_node(c, node, len, 0); ubifs_prepare_node(c, node, len, 0);
return ubifs_wbuf_write_nolock(wbuf, node, len); return ubifs_wbuf_write_nolock(wbuf, node, len);
...@@ -285,7 +287,8 @@ static int write_head(struct ubifs_info *c, int jhead, void *buf, int len, ...@@ -285,7 +287,8 @@ static int write_head(struct ubifs_info *c, int jhead, void *buf, int len,
*lnum = c->jheads[jhead].wbuf.lnum; *lnum = c->jheads[jhead].wbuf.lnum;
*offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used; *offs = c->jheads[jhead].wbuf.offs + c->jheads[jhead].wbuf.used;
dbg_jnl("jhead %d, LEB %d:%d, len %d", jhead, *lnum, *offs, len); dbg_jnl("jhead %s, LEB %d:%d, len %d",
dbg_jhead(jhead), *lnum, *offs, len);
err = ubifs_wbuf_write_nolock(wbuf, buf, len); err = ubifs_wbuf_write_nolock(wbuf, buf, len);
if (err) if (err)
......
...@@ -228,23 +228,6 @@ static inline void xent_key_init(const struct ubifs_info *c, ...@@ -228,23 +228,6 @@ static inline void xent_key_init(const struct ubifs_info *c,
key->u32[1] = hash | (UBIFS_XENT_KEY << UBIFS_S_KEY_HASH_BITS); key->u32[1] = hash | (UBIFS_XENT_KEY << UBIFS_S_KEY_HASH_BITS);
} }
/**
* xent_key_init_hash - initialize extended attribute entry key without
* re-calculating hash function.
* @c: UBIFS file-system description object
* @key: key to initialize
* @inum: host inode number
* @hash: extended attribute entry name hash
*/
static inline void xent_key_init_hash(const struct ubifs_info *c,
union ubifs_key *key, ino_t inum,
uint32_t hash)
{
ubifs_assert(!(hash & ~UBIFS_S_KEY_HASH_MASK));
key->u32[0] = inum;
key->u32[1] = hash | (UBIFS_XENT_KEY << UBIFS_S_KEY_HASH_BITS);
}
/** /**
* xent_key_init_flash - initialize on-flash extended attribute entry key. * xent_key_init_flash - initialize on-flash extended attribute entry key.
* @c: UBIFS file-system description object * @c: UBIFS file-system description object
...@@ -295,22 +278,15 @@ static inline void data_key_init(const struct ubifs_info *c, ...@@ -295,22 +278,15 @@ static inline void data_key_init(const struct ubifs_info *c,
} }
/** /**
* data_key_init_flash - initialize on-flash data key. * highest_data_key - get the highest possible data key for an inode.
* @c: UBIFS file-system description object * @c: UBIFS file-system description object
* @k: key to initialize * @key: key to initialize
* @inum: inode number * @inum: inode number
* @block: block number
*/ */
static inline void data_key_init_flash(const struct ubifs_info *c, void *k, static inline void highest_data_key(const struct ubifs_info *c,
ino_t inum, unsigned int block) union ubifs_key *key, ino_t inum)
{ {
union ubifs_key *key = k; data_key_init(c, key, inum, UBIFS_S_KEY_BLOCK_MASK);
ubifs_assert(!(block & ~UBIFS_S_KEY_BLOCK_MASK));
key->j32[0] = cpu_to_le32(inum);
key->j32[1] = cpu_to_le32(block |
(UBIFS_DATA_KEY << UBIFS_S_KEY_BLOCK_BITS));
memset(k + 8, 0, UBIFS_MAX_KEY_LEN - 8);
} }
/** /**
...@@ -554,4 +530,5 @@ static inline unsigned long long key_max_inode_size(const struct ubifs_info *c) ...@@ -554,4 +530,5 @@ static inline unsigned long long key_max_inode_size(const struct ubifs_info *c)
return 0; return 0;
} }
} }
#endif /* !__UBIFS_KEY_H__ */ #endif /* !__UBIFS_KEY_H__ */
...@@ -169,8 +169,8 @@ void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud) ...@@ -169,8 +169,8 @@ void ubifs_add_bud(struct ubifs_info *c, struct ubifs_bud *bud)
*/ */
c->bud_bytes += c->leb_size - bud->start; c->bud_bytes += c->leb_size - bud->start;
dbg_log("LEB %d:%d, jhead %d, bud_bytes %lld", bud->lnum, dbg_log("LEB %d:%d, jhead %s, bud_bytes %lld", bud->lnum,
bud->start, bud->jhead, c->bud_bytes); bud->start, dbg_jhead(bud->jhead), c->bud_bytes);
spin_unlock(&c->buds_lock); spin_unlock(&c->buds_lock);
} }
...@@ -355,16 +355,16 @@ static void remove_buds(struct ubifs_info *c) ...@@ -355,16 +355,16 @@ static void remove_buds(struct ubifs_info *c)
* heads (non-closed buds). * heads (non-closed buds).
*/ */
c->cmt_bud_bytes += wbuf->offs - bud->start; c->cmt_bud_bytes += wbuf->offs - bud->start;
dbg_log("preserve %d:%d, jhead %d, bud bytes %d, " dbg_log("preserve %d:%d, jhead %s, bud bytes %d, "
"cmt_bud_bytes %lld", bud->lnum, bud->start, "cmt_bud_bytes %lld", bud->lnum, bud->start,
bud->jhead, wbuf->offs - bud->start, dbg_jhead(bud->jhead), wbuf->offs - bud->start,
c->cmt_bud_bytes); c->cmt_bud_bytes);
bud->start = wbuf->offs; bud->start = wbuf->offs;
} else { } else {
c->cmt_bud_bytes += c->leb_size - bud->start; c->cmt_bud_bytes += c->leb_size - bud->start;
dbg_log("remove %d:%d, jhead %d, bud bytes %d, " dbg_log("remove %d:%d, jhead %s, bud bytes %d, "
"cmt_bud_bytes %lld", bud->lnum, bud->start, "cmt_bud_bytes %lld", bud->lnum, bud->start,
bud->jhead, c->leb_size - bud->start, dbg_jhead(bud->jhead), c->leb_size - bud->start,
c->cmt_bud_bytes); c->cmt_bud_bytes);
rb_erase(p1, &c->buds); rb_erase(p1, &c->buds);
/* /*
...@@ -429,7 +429,8 @@ int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum) ...@@ -429,7 +429,8 @@ int ubifs_log_start_commit(struct ubifs_info *c, int *ltail_lnum)
if (lnum == -1 || offs == c->leb_size) if (lnum == -1 || offs == c->leb_size)
continue; continue;
dbg_log("add ref to LEB %d:%d for jhead %d", lnum, offs, i); dbg_log("add ref to LEB %d:%d for jhead %s",
lnum, offs, dbg_jhead(i));
ref = buf + len; ref = buf + len;
ref->ch.node_type = UBIFS_REF_NODE; ref->ch.node_type = UBIFS_REF_NODE;
ref->lnum = cpu_to_le32(lnum); ref->lnum = cpu_to_le32(lnum);
...@@ -695,7 +696,7 @@ int ubifs_consolidate_log(struct ubifs_info *c) ...@@ -695,7 +696,7 @@ int ubifs_consolidate_log(struct ubifs_info *c)
lnum = c->ltail_lnum; lnum = c->ltail_lnum;
write_lnum = lnum; write_lnum = lnum;
while (1) { while (1) {
sleb = ubifs_scan(c, lnum, 0, c->sbuf); sleb = ubifs_scan(c, lnum, 0, c->sbuf, 0);
if (IS_ERR(sleb)) { if (IS_ERR(sleb)) {
err = PTR_ERR(sleb); err = PTR_ERR(sleb);
goto out_free; goto out_free;
......
...@@ -281,7 +281,7 @@ void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops, ...@@ -281,7 +281,7 @@ void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops,
case LPROPS_FREE: case LPROPS_FREE:
if (add_to_lpt_heap(c, lprops, cat)) if (add_to_lpt_heap(c, lprops, cat))
break; break;
/* No more room on heap so make it uncategorized */ /* No more room on heap so make it un-categorized */
cat = LPROPS_UNCAT; cat = LPROPS_UNCAT;
/* Fall through */ /* Fall through */
case LPROPS_UNCAT: case LPROPS_UNCAT:
...@@ -375,8 +375,8 @@ void ubifs_replace_cat(struct ubifs_info *c, struct ubifs_lprops *old_lprops, ...@@ -375,8 +375,8 @@ void ubifs_replace_cat(struct ubifs_info *c, struct ubifs_lprops *old_lprops,
* @lprops: LEB properties * @lprops: LEB properties
* *
* A LEB may have fallen off of the bottom of a heap, and ended up as * A LEB may have fallen off of the bottom of a heap, and ended up as
* uncategorized even though it has enough space for us now. If that is the case * un-categorized even though it has enough space for us now. If that is the
* this function will put the LEB back onto a heap. * case this function will put the LEB back onto a heap.
*/ */
void ubifs_ensure_cat(struct ubifs_info *c, struct ubifs_lprops *lprops) void ubifs_ensure_cat(struct ubifs_info *c, struct ubifs_lprops *lprops)
{ {
...@@ -436,10 +436,10 @@ int ubifs_categorize_lprops(const struct ubifs_info *c, ...@@ -436,10 +436,10 @@ int ubifs_categorize_lprops(const struct ubifs_info *c,
/** /**
* change_category - change LEB properties category. * change_category - change LEB properties category.
* @c: UBIFS file-system description object * @c: UBIFS file-system description object
* @lprops: LEB properties to recategorize * @lprops: LEB properties to re-categorize
* *
* LEB properties are categorized to enable fast find operations. When the LEB * LEB properties are categorized to enable fast find operations. When the LEB
* properties change they must be recategorized. * properties change they must be re-categorized.
*/ */
static void change_category(struct ubifs_info *c, struct ubifs_lprops *lprops) static void change_category(struct ubifs_info *c, struct ubifs_lprops *lprops)
{ {
...@@ -461,21 +461,18 @@ static void change_category(struct ubifs_info *c, struct ubifs_lprops *lprops) ...@@ -461,21 +461,18 @@ static void change_category(struct ubifs_info *c, struct ubifs_lprops *lprops)
} }
/** /**
* calc_dark - calculate LEB dark space size. * ubifs_calc_dark - calculate LEB dark space size.
* @c: the UBIFS file-system description object * @c: the UBIFS file-system description object
* @spc: amount of free and dirty space in the LEB * @spc: amount of free and dirty space in the LEB
* *
* This function calculates amount of dark space in an LEB which has @spc bytes * This function calculates and returns amount of dark space in an LEB which
* of free and dirty space. Returns the calculations result. * has @spc bytes of free and dirty space.
* *
* Dark space is the space which is not always usable - it depends on which * UBIFS is trying to account the space which might not be usable, and this
* nodes are written in which order. E.g., if an LEB has only 512 free bytes, * space is called "dark space". For example, if an LEB has only %512 free
* it is dark space, because it cannot fit a large data node. So UBIFS cannot * bytes, it is dark space, because it cannot fit a large data node.
* count on this LEB and treat these 512 bytes as usable because it is not true
* if, for example, only big chunks of uncompressible data will be written to
* the FS.
*/ */
static int calc_dark(struct ubifs_info *c, int spc) int ubifs_calc_dark(const struct ubifs_info *c, int spc)
{ {
ubifs_assert(!(spc & 7)); ubifs_assert(!(spc & 7));
...@@ -518,7 +515,7 @@ static int is_lprops_dirty(struct ubifs_info *c, struct ubifs_lprops *lprops) ...@@ -518,7 +515,7 @@ static int is_lprops_dirty(struct ubifs_info *c, struct ubifs_lprops *lprops)
* @free: new free space amount * @free: new free space amount
* @dirty: new dirty space amount * @dirty: new dirty space amount
* @flags: new flags * @flags: new flags
* @idx_gc_cnt: change to the count of idx_gc list * @idx_gc_cnt: change to the count of @idx_gc list
* *
* This function changes LEB properties (@free, @dirty or @flag). However, the * This function changes LEB properties (@free, @dirty or @flag). However, the
* property which has the %LPROPS_NC value is not changed. Returns a pointer to * property which has the %LPROPS_NC value is not changed. Returns a pointer to
...@@ -535,7 +532,7 @@ const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c, ...@@ -535,7 +532,7 @@ const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c,
{ {
/* /*
* This is the only function that is allowed to change lprops, so we * This is the only function that is allowed to change lprops, so we
* discard the const qualifier. * discard the "const" qualifier.
*/ */
struct ubifs_lprops *lprops = (struct ubifs_lprops *)lp; struct ubifs_lprops *lprops = (struct ubifs_lprops *)lp;
...@@ -575,7 +572,7 @@ const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c, ...@@ -575,7 +572,7 @@ const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c,
if (old_spc < c->dead_wm) if (old_spc < c->dead_wm)
c->lst.total_dead -= old_spc; c->lst.total_dead -= old_spc;
else else
c->lst.total_dark -= calc_dark(c, old_spc); c->lst.total_dark -= ubifs_calc_dark(c, old_spc);
c->lst.total_used -= c->leb_size - old_spc; c->lst.total_used -= c->leb_size - old_spc;
} }
...@@ -616,7 +613,7 @@ const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c, ...@@ -616,7 +613,7 @@ const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c,
if (new_spc < c->dead_wm) if (new_spc < c->dead_wm)
c->lst.total_dead += new_spc; c->lst.total_dead += new_spc;
else else
c->lst.total_dark += calc_dark(c, new_spc); c->lst.total_dark += ubifs_calc_dark(c, new_spc);
c->lst.total_used += c->leb_size - new_spc; c->lst.total_used += c->leb_size - new_spc;
} }
...@@ -1096,7 +1093,7 @@ static int scan_check_cb(struct ubifs_info *c, ...@@ -1096,7 +1093,7 @@ static int scan_check_cb(struct ubifs_info *c,
} }
} }
sleb = ubifs_scan(c, lnum, 0, c->dbg->buf); sleb = ubifs_scan(c, lnum, 0, c->dbg->buf, 0);
if (IS_ERR(sleb)) { if (IS_ERR(sleb)) {
/* /*
* After an unclean unmount, empty and freeable LEBs * After an unclean unmount, empty and freeable LEBs
...@@ -1107,7 +1104,7 @@ static int scan_check_cb(struct ubifs_info *c, ...@@ -1107,7 +1104,7 @@ static int scan_check_cb(struct ubifs_info *c,
"- continuing checking"); "- continuing checking");
lst->empty_lebs += 1; lst->empty_lebs += 1;
lst->total_free += c->leb_size; lst->total_free += c->leb_size;
lst->total_dark += calc_dark(c, c->leb_size); lst->total_dark += ubifs_calc_dark(c, c->leb_size);
return LPT_SCAN_CONTINUE; return LPT_SCAN_CONTINUE;
} }
...@@ -1117,7 +1114,7 @@ static int scan_check_cb(struct ubifs_info *c, ...@@ -1117,7 +1114,7 @@ static int scan_check_cb(struct ubifs_info *c,
"- continuing checking"); "- continuing checking");
lst->total_free += lp->free; lst->total_free += lp->free;
lst->total_dirty += lp->dirty; lst->total_dirty += lp->dirty;
lst->total_dark += calc_dark(c, c->leb_size); lst->total_dark += ubifs_calc_dark(c, c->leb_size);
return LPT_SCAN_CONTINUE; return LPT_SCAN_CONTINUE;
} }
data->err = PTR_ERR(sleb); data->err = PTR_ERR(sleb);
...@@ -1235,7 +1232,7 @@ static int scan_check_cb(struct ubifs_info *c, ...@@ -1235,7 +1232,7 @@ static int scan_check_cb(struct ubifs_info *c,
if (spc < c->dead_wm) if (spc < c->dead_wm)
lst->total_dead += spc; lst->total_dead += spc;
else else
lst->total_dark += calc_dark(c, spc); lst->total_dark += ubifs_calc_dark(c, spc);
} }
ubifs_scan_destroy(sleb); ubifs_scan_destroy(sleb);
......
...@@ -29,7 +29,8 @@ ...@@ -29,7 +29,8 @@
* @c: UBIFS file-system description object * @c: UBIFS file-system description object
* *
* This function scans the master node LEBs and search for the latest master * This function scans the master node LEBs and search for the latest master
* node. Returns zero in case of success and a negative error code in case of * node. Returns zero in case of success, %-EUCLEAN if there master area is
* corrupted and requires recovery, and a negative error code in case of
* failure. * failure.
*/ */
static int scan_for_master(struct ubifs_info *c) static int scan_for_master(struct ubifs_info *c)
...@@ -40,7 +41,7 @@ static int scan_for_master(struct ubifs_info *c) ...@@ -40,7 +41,7 @@ static int scan_for_master(struct ubifs_info *c)
lnum = UBIFS_MST_LNUM; lnum = UBIFS_MST_LNUM;
sleb = ubifs_scan(c, lnum, 0, c->sbuf); sleb = ubifs_scan(c, lnum, 0, c->sbuf, 1);
if (IS_ERR(sleb)) if (IS_ERR(sleb))
return PTR_ERR(sleb); return PTR_ERR(sleb);
nodes_cnt = sleb->nodes_cnt; nodes_cnt = sleb->nodes_cnt;
...@@ -48,7 +49,7 @@ static int scan_for_master(struct ubifs_info *c) ...@@ -48,7 +49,7 @@ static int scan_for_master(struct ubifs_info *c)
snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node, snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node,
list); list);
if (snod->type != UBIFS_MST_NODE) if (snod->type != UBIFS_MST_NODE)
goto out; goto out_dump;
memcpy(c->mst_node, snod->node, snod->len); memcpy(c->mst_node, snod->node, snod->len);
offs = snod->offs; offs = snod->offs;
} }
...@@ -56,7 +57,7 @@ static int scan_for_master(struct ubifs_info *c) ...@@ -56,7 +57,7 @@ static int scan_for_master(struct ubifs_info *c)
lnum += 1; lnum += 1;
sleb = ubifs_scan(c, lnum, 0, c->sbuf); sleb = ubifs_scan(c, lnum, 0, c->sbuf, 1);
if (IS_ERR(sleb)) if (IS_ERR(sleb))
return PTR_ERR(sleb); return PTR_ERR(sleb);
if (sleb->nodes_cnt != nodes_cnt) if (sleb->nodes_cnt != nodes_cnt)
...@@ -65,7 +66,7 @@ static int scan_for_master(struct ubifs_info *c) ...@@ -65,7 +66,7 @@ static int scan_for_master(struct ubifs_info *c)
goto out; goto out;
snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node, list); snod = list_entry(sleb->nodes.prev, struct ubifs_scan_node, list);
if (snod->type != UBIFS_MST_NODE) if (snod->type != UBIFS_MST_NODE)
goto out; goto out_dump;
if (snod->offs != offs) if (snod->offs != offs)
goto out; goto out;
if (memcmp((void *)c->mst_node + UBIFS_CH_SZ, if (memcmp((void *)c->mst_node + UBIFS_CH_SZ,
...@@ -78,6 +79,12 @@ static int scan_for_master(struct ubifs_info *c) ...@@ -78,6 +79,12 @@ static int scan_for_master(struct ubifs_info *c)
out: out:
ubifs_scan_destroy(sleb); ubifs_scan_destroy(sleb);
return -EUCLEAN;
out_dump:
ubifs_err("unexpected node type %d master LEB %d:%d",
snod->type, lnum, snod->offs);
ubifs_scan_destroy(sleb);
return -EINVAL; return -EINVAL;
} }
...@@ -256,7 +263,8 @@ int ubifs_read_master(struct ubifs_info *c) ...@@ -256,7 +263,8 @@ int ubifs_read_master(struct ubifs_info *c)
err = scan_for_master(c); err = scan_for_master(c);
if (err) { if (err) {
err = ubifs_recover_master_node(c); if (err == -EUCLEAN)
err = ubifs_recover_master_node(c);
if (err) if (err)
/* /*
* Note, we do not free 'c->mst_node' here because the * Note, we do not free 'c->mst_node' here because the
......
...@@ -670,9 +670,10 @@ static int kill_orphans(struct ubifs_info *c) ...@@ -670,9 +670,10 @@ static int kill_orphans(struct ubifs_info *c)
struct ubifs_scan_leb *sleb; struct ubifs_scan_leb *sleb;
dbg_rcvry("LEB %d", lnum); dbg_rcvry("LEB %d", lnum);
sleb = ubifs_scan(c, lnum, 0, c->sbuf); sleb = ubifs_scan(c, lnum, 0, c->sbuf, 1);
if (IS_ERR(sleb)) { if (IS_ERR(sleb)) {
sleb = ubifs_recover_leb(c, lnum, 0, c->sbuf, 0); if (PTR_ERR(sleb) == -EUCLEAN)
sleb = ubifs_recover_leb(c, lnum, 0, c->sbuf, 0);
if (IS_ERR(sleb)) { if (IS_ERR(sleb)) {
err = PTR_ERR(sleb); err = PTR_ERR(sleb);
break; break;
...@@ -899,7 +900,7 @@ static int dbg_scan_orphans(struct ubifs_info *c, struct check_info *ci) ...@@ -899,7 +900,7 @@ static int dbg_scan_orphans(struct ubifs_info *c, struct check_info *ci)
for (lnum = c->orph_first; lnum <= c->orph_last; lnum++) { for (lnum = c->orph_first; lnum <= c->orph_last; lnum++) {
struct ubifs_scan_leb *sleb; struct ubifs_scan_leb *sleb;
sleb = ubifs_scan(c, lnum, 0, c->dbg->buf); sleb = ubifs_scan(c, lnum, 0, c->dbg->buf, 0);
if (IS_ERR(sleb)) { if (IS_ERR(sleb)) {
err = PTR_ERR(sleb); err = PTR_ERR(sleb);
break; break;
......
...@@ -286,7 +286,7 @@ int ubifs_recover_master_node(struct ubifs_info *c) ...@@ -286,7 +286,7 @@ int ubifs_recover_master_node(struct ubifs_info *c)
mst = mst2; mst = mst2;
} }
dbg_rcvry("recovered master node from LEB %d", ubifs_msg("recovered master node from LEB %d",
(mst == mst1 ? UBIFS_MST_LNUM : UBIFS_MST_LNUM + 1)); (mst == mst1 ? UBIFS_MST_LNUM : UBIFS_MST_LNUM + 1));
memcpy(c->mst_node, mst, UBIFS_MST_NODE_SZ); memcpy(c->mst_node, mst, UBIFS_MST_NODE_SZ);
...@@ -790,7 +790,7 @@ struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum, ...@@ -790,7 +790,7 @@ struct ubifs_scan_leb *ubifs_recover_log_leb(struct ubifs_info *c, int lnum,
* We can only recover at the end of the log, so check that the * We can only recover at the end of the log, so check that the
* next log LEB is empty or out of date. * next log LEB is empty or out of date.
*/ */
sleb = ubifs_scan(c, next_lnum, 0, sbuf); sleb = ubifs_scan(c, next_lnum, 0, sbuf, 0);
if (IS_ERR(sleb)) if (IS_ERR(sleb))
return sleb; return sleb;
if (sleb->nodes_cnt) { if (sleb->nodes_cnt) {
......
...@@ -506,7 +506,7 @@ static int replay_bud(struct ubifs_info *c, int lnum, int offs, int jhead, ...@@ -506,7 +506,7 @@ static int replay_bud(struct ubifs_info *c, int lnum, int offs, int jhead,
if (c->need_recovery) if (c->need_recovery)
sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf, jhead != GCHD); sleb = ubifs_recover_leb(c, lnum, offs, c->sbuf, jhead != GCHD);
else else
sleb = ubifs_scan(c, lnum, offs, c->sbuf); sleb = ubifs_scan(c, lnum, offs, c->sbuf, 0);
if (IS_ERR(sleb)) if (IS_ERR(sleb))
return PTR_ERR(sleb); return PTR_ERR(sleb);
...@@ -836,8 +836,8 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf) ...@@ -836,8 +836,8 @@ static int replay_log_leb(struct ubifs_info *c, int lnum, int offs, void *sbuf)
const struct ubifs_cs_node *node; const struct ubifs_cs_node *node;
dbg_mnt("replay log LEB %d:%d", lnum, offs); dbg_mnt("replay log LEB %d:%d", lnum, offs);
sleb = ubifs_scan(c, lnum, offs, sbuf); sleb = ubifs_scan(c, lnum, offs, sbuf, c->need_recovery);
if (IS_ERR(sleb) ) { if (IS_ERR(sleb)) {
if (PTR_ERR(sleb) != -EUCLEAN || !c->need_recovery) if (PTR_ERR(sleb) != -EUCLEAN || !c->need_recovery)
return PTR_ERR(sleb); return PTR_ERR(sleb);
sleb = ubifs_recover_log_leb(c, lnum, offs, sbuf); sleb = ubifs_recover_log_leb(c, lnum, offs, sbuf);
......
...@@ -108,10 +108,9 @@ int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum, ...@@ -108,10 +108,9 @@ int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum,
/* Make the node pads to 8-byte boundary */ /* Make the node pads to 8-byte boundary */
if ((node_len + pad_len) & 7) { if ((node_len + pad_len) & 7) {
if (!quiet) { if (!quiet)
dbg_err("bad padding length %d - %d", dbg_err("bad padding length %d - %d",
offs, offs + node_len + pad_len); offs, offs + node_len + pad_len);
}
return SCANNED_A_BAD_PAD_NODE; return SCANNED_A_BAD_PAD_NODE;
} }
...@@ -253,15 +252,19 @@ void ubifs_scanned_corruption(const struct ubifs_info *c, int lnum, int offs, ...@@ -253,15 +252,19 @@ void ubifs_scanned_corruption(const struct ubifs_info *c, int lnum, int offs,
* @c: UBIFS file-system description object * @c: UBIFS file-system description object
* @lnum: logical eraseblock number * @lnum: logical eraseblock number
* @offs: offset to start at (usually zero) * @offs: offset to start at (usually zero)
* @sbuf: scan buffer (must be c->leb_size) * @sbuf: scan buffer (must be of @c->leb_size bytes in size)
* @quiet: print no messages
* *
* This function scans LEB number @lnum and returns complete information about * This function scans LEB number @lnum and returns complete information about
* its contents. Returns the scaned information in case of success and, * its contents. Returns the scaned information in case of success and,
* %-EUCLEAN if the LEB neads recovery, and other negative error codes in case * %-EUCLEAN if the LEB neads recovery, and other negative error codes in case
* of failure. * of failure.
*
* If @quiet is non-zero, this function does not print large and scary
* error messages and flash dumps in case of errors.
*/ */
struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum, struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum,
int offs, void *sbuf) int offs, void *sbuf, int quiet)
{ {
void *buf = sbuf + offs; void *buf = sbuf + offs;
int err, len = c->leb_size - offs; int err, len = c->leb_size - offs;
...@@ -280,7 +283,7 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum, ...@@ -280,7 +283,7 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum,
cond_resched(); cond_resched();
ret = ubifs_scan_a_node(c, buf, len, lnum, offs, 0); ret = ubifs_scan_a_node(c, buf, len, lnum, offs, quiet);
if (ret > 0) { if (ret > 0) {
/* Padding bytes or a valid padding node */ /* Padding bytes or a valid padding node */
offs += ret; offs += ret;
...@@ -320,7 +323,9 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum, ...@@ -320,7 +323,9 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum,
} }
if (offs % c->min_io_size) { if (offs % c->min_io_size) {
ubifs_err("empty space starts at non-aligned offset %d", offs); if (!quiet)
ubifs_err("empty space starts at non-aligned offset %d",
offs);
goto corrupted;; goto corrupted;;
} }
...@@ -331,18 +336,25 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum, ...@@ -331,18 +336,25 @@ struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum,
break; break;
for (; len; offs++, buf++, len--) for (; len; offs++, buf++, len--)
if (*(uint8_t *)buf != 0xff) { if (*(uint8_t *)buf != 0xff) {
ubifs_err("corrupt empty space at LEB %d:%d", if (!quiet)
lnum, offs); ubifs_err("corrupt empty space at LEB %d:%d",
lnum, offs);
goto corrupted; goto corrupted;
} }
return sleb; return sleb;
corrupted: corrupted:
ubifs_scanned_corruption(c, lnum, offs, buf); if (!quiet) {
ubifs_scanned_corruption(c, lnum, offs, buf);
ubifs_err("LEB %d scanning failed", lnum);
}
err = -EUCLEAN; err = -EUCLEAN;
ubifs_scan_destroy(sleb);
return ERR_PTR(err);
error: error:
ubifs_err("LEB %d scanning failed", lnum); ubifs_err("LEB %d scanning failed, error %d", lnum, err);
ubifs_scan_destroy(sleb); ubifs_scan_destroy(sleb);
return ERR_PTR(err); return ERR_PTR(err);
} }
......
...@@ -36,7 +36,6 @@ ...@@ -36,7 +36,6 @@
#include <linux/mount.h> #include <linux/mount.h>
#include <linux/math64.h> #include <linux/math64.h>
#include <linux/writeback.h> #include <linux/writeback.h>
#include <linux/smp_lock.h>
#include "ubifs.h" #include "ubifs.h"
/* /*
...@@ -318,6 +317,8 @@ static int ubifs_write_inode(struct inode *inode, int wait) ...@@ -318,6 +317,8 @@ static int ubifs_write_inode(struct inode *inode, int wait)
if (err) if (err)
ubifs_err("can't write inode %lu, error %d", ubifs_err("can't write inode %lu, error %d",
inode->i_ino, err); inode->i_ino, err);
else
err = dbg_check_inode_size(c, inode, ui->ui_size);
} }
ui->dirty = 0; ui->dirty = 0;
...@@ -447,17 +448,6 @@ static int ubifs_sync_fs(struct super_block *sb, int wait) ...@@ -447,17 +448,6 @@ static int ubifs_sync_fs(struct super_block *sb, int wait)
if (!wait) if (!wait)
return 0; return 0;
/*
* VFS calls '->sync_fs()' before synchronizing all dirty inodes and
* pages, so synchronize them first, then commit the journal. Strictly
* speaking, it is not necessary to commit the journal here,
* synchronizing write-buffers would be enough. But committing makes
* UBIFS free space predictions much more accurate, so we want to let
* the user be able to get more accurate results of 'statfs()' after
* they synchronize the file system.
*/
sync_inodes_sb(sb);
/* /*
* Synchronize write buffers, because 'ubifs_run_commit()' does not * Synchronize write buffers, because 'ubifs_run_commit()' does not
* do this if it waits for an already running commit. * do this if it waits for an already running commit.
...@@ -468,6 +458,13 @@ static int ubifs_sync_fs(struct super_block *sb, int wait) ...@@ -468,6 +458,13 @@ static int ubifs_sync_fs(struct super_block *sb, int wait)
return err; return err;
} }
/*
* Strictly speaking, it is not necessary to commit the journal here,
* synchronizing write-buffers would be enough. But committing makes
* UBIFS free space predictions much more accurate, so we want to let
* the user be able to get more accurate results of 'statfs()' after
* they synchronize the file system.
*/
err = ubifs_run_commit(c); err = ubifs_run_commit(c);
if (err) if (err)
return err; return err;
...@@ -1720,8 +1717,6 @@ static void ubifs_put_super(struct super_block *sb) ...@@ -1720,8 +1717,6 @@ static void ubifs_put_super(struct super_block *sb)
ubifs_msg("un-mount UBI device %d, volume %d", c->vi.ubi_num, ubifs_msg("un-mount UBI device %d, volume %d", c->vi.ubi_num,
c->vi.vol_id); c->vi.vol_id);
lock_kernel();
/* /*
* The following asserts are only valid if there has not been a failure * The following asserts are only valid if there has not been a failure
* of the media. For example, there will be dirty inodes if we failed * of the media. For example, there will be dirty inodes if we failed
...@@ -1786,8 +1781,6 @@ static void ubifs_put_super(struct super_block *sb) ...@@ -1786,8 +1781,6 @@ static void ubifs_put_super(struct super_block *sb)
ubi_close_volume(c->ubi); ubi_close_volume(c->ubi);
mutex_unlock(&c->umount_mutex); mutex_unlock(&c->umount_mutex);
kfree(c); kfree(c);
unlock_kernel();
} }
static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data) static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
...@@ -1803,22 +1796,17 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data) ...@@ -1803,22 +1796,17 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
return err; return err;
} }
lock_kernel();
if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) { if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) {
if (c->ro_media) { if (c->ro_media) {
ubifs_msg("cannot re-mount due to prior errors"); ubifs_msg("cannot re-mount due to prior errors");
unlock_kernel();
return -EROFS; return -EROFS;
} }
err = ubifs_remount_rw(c); err = ubifs_remount_rw(c);
if (err) { if (err)
unlock_kernel();
return err; return err;
}
} else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) { } else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) {
if (c->ro_media) { if (c->ro_media) {
ubifs_msg("cannot re-mount due to prior errors"); ubifs_msg("cannot re-mount due to prior errors");
unlock_kernel();
return -EROFS; return -EROFS;
} }
ubifs_remount_ro(c); ubifs_remount_ro(c);
...@@ -1833,7 +1821,6 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data) ...@@ -1833,7 +1821,6 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
} }
ubifs_assert(c->lst.taken_empty_lebs > 0); ubifs_assert(c->lst.taken_empty_lebs > 0);
unlock_kernel();
return 0; return 0;
} }
......
...@@ -1159,8 +1159,8 @@ static struct ubifs_znode *dirty_cow_bottom_up(struct ubifs_info *c, ...@@ -1159,8 +1159,8 @@ static struct ubifs_znode *dirty_cow_bottom_up(struct ubifs_info *c,
* o exact match, i.e. the found zero-level znode contains key @key, then %1 * o exact match, i.e. the found zero-level znode contains key @key, then %1
* is returned and slot number of the matched branch is stored in @n; * is returned and slot number of the matched branch is stored in @n;
* o not exact match, which means that zero-level znode does not contain * o not exact match, which means that zero-level znode does not contain
* @key, then %0 is returned and slot number of the closed branch is stored * @key, then %0 is returned and slot number of the closest branch is stored
* in @n; * in @n;
* o @key is so small that it is even less than the lowest key of the * o @key is so small that it is even less than the lowest key of the
* leftmost zero-level node, then %0 is returned and %0 is stored in @n. * leftmost zero-level node, then %0 is returned and %0 is stored in @n.
* *
...@@ -1433,7 +1433,7 @@ static int maybe_leb_gced(struct ubifs_info *c, int lnum, int gc_seq1) ...@@ -1433,7 +1433,7 @@ static int maybe_leb_gced(struct ubifs_info *c, int lnum, int gc_seq1)
* @lnum: LEB number is returned here * @lnum: LEB number is returned here
* @offs: offset is returned here * @offs: offset is returned here
* *
* This function look up and reads node with key @key. The caller has to make * This function looks up and reads node with key @key. The caller has to make
* sure the @node buffer is large enough to fit the node. Returns zero in case * sure the @node buffer is large enough to fit the node. Returns zero in case
* of success, %-ENOENT if the node was not found, and a negative error code in * of success, %-ENOENT if the node was not found, and a negative error code in
* case of failure. The node location can be returned in @lnum and @offs. * case of failure. The node location can be returned in @lnum and @offs.
...@@ -3268,3 +3268,73 @@ int ubifs_dirty_idx_node(struct ubifs_info *c, union ubifs_key *key, int level, ...@@ -3268,3 +3268,73 @@ int ubifs_dirty_idx_node(struct ubifs_info *c, union ubifs_key *key, int level,
mutex_unlock(&c->tnc_mutex); mutex_unlock(&c->tnc_mutex);
return err; return err;
} }
#ifdef CONFIG_UBIFS_FS_DEBUG
/**
* dbg_check_inode_size - check if inode size is correct.
* @c: UBIFS file-system description object
* @inum: inode number
* @size: inode size
*
* This function makes sure that the inode size (@size) is correct and it does
* not have any pages beyond @size. Returns zero if the inode is OK, %-EINVAL
* if it has a data page beyond @size, and other negative error code in case of
* other errors.
*/
int dbg_check_inode_size(struct ubifs_info *c, const struct inode *inode,
loff_t size)
{
int err, n;
union ubifs_key from_key, to_key, *key;
struct ubifs_znode *znode;
unsigned int block;
if (!S_ISREG(inode->i_mode))
return 0;
if (!(ubifs_chk_flags & UBIFS_CHK_GEN))
return 0;
block = (size + UBIFS_BLOCK_SIZE - 1) >> UBIFS_BLOCK_SHIFT;
data_key_init(c, &from_key, inode->i_ino, block);
highest_data_key(c, &to_key, inode->i_ino);
mutex_lock(&c->tnc_mutex);
err = ubifs_lookup_level0(c, &from_key, &znode, &n);
if (err < 0)
goto out_unlock;
if (err) {
err = -EINVAL;
key = &from_key;
goto out_dump;
}
err = tnc_next(c, &znode, &n);
if (err == -ENOENT) {
err = 0;
goto out_unlock;
}
if (err < 0)
goto out_unlock;
ubifs_assert(err == 0);
key = &znode->zbranch[n].key;
if (!key_in_range(c, key, &from_key, &to_key))
goto out_unlock;
out_dump:
block = key_block(c, key);
ubifs_err("inode %lu has size %lld, but there are data at offset %lld "
"(data key %s)", (unsigned long)inode->i_ino, size,
((loff_t)block) << UBIFS_BLOCK_SHIFT, DBGKEY(key));
dbg_dump_inode(c, inode);
dbg_dump_stack();
err = -EINVAL;
out_unlock:
mutex_unlock(&c->tnc_mutex);
return err;
}
#endif /* CONFIG_UBIFS_FS_DEBUG */
...@@ -245,7 +245,7 @@ static int layout_leb_in_gaps(struct ubifs_info *c, int *p) ...@@ -245,7 +245,7 @@ static int layout_leb_in_gaps(struct ubifs_info *c, int *p)
* it is more comprehensive and less efficient than is needed for this * it is more comprehensive and less efficient than is needed for this
* purpose. * purpose.
*/ */
sleb = ubifs_scan(c, lnum, 0, c->ileb_buf); sleb = ubifs_scan(c, lnum, 0, c->ileb_buf, 0);
c->ileb_len = 0; c->ileb_len = 0;
if (IS_ERR(sleb)) if (IS_ERR(sleb))
return PTR_ERR(sleb); return PTR_ERR(sleb);
......
...@@ -135,6 +135,13 @@ ...@@ -135,6 +135,13 @@
/* The key is always at the same position in all keyed nodes */ /* The key is always at the same position in all keyed nodes */
#define UBIFS_KEY_OFFSET offsetof(struct ubifs_ino_node, key) #define UBIFS_KEY_OFFSET offsetof(struct ubifs_ino_node, key)
/* Garbage collector journal head number */
#define UBIFS_GC_HEAD 0
/* Base journal head number */
#define UBIFS_BASE_HEAD 1
/* Data journal head number */
#define UBIFS_DATA_HEAD 2
/* /*
* LEB Properties Tree node types. * LEB Properties Tree node types.
* *
......
...@@ -105,12 +105,10 @@ ...@@ -105,12 +105,10 @@
/* Number of non-data journal heads */ /* Number of non-data journal heads */
#define NONDATA_JHEADS_CNT 2 #define NONDATA_JHEADS_CNT 2
/* Garbage collector head */ /* Shorter names for journal head numbers for internal usage */
#define GCHD 0 #define GCHD UBIFS_GC_HEAD
/* Base journal head number */ #define BASEHD UBIFS_BASE_HEAD
#define BASEHD 1 #define DATAHD UBIFS_DATA_HEAD
/* First "general purpose" journal head */
#define DATAHD 2
/* 'No change' value for 'ubifs_change_lp()' */ /* 'No change' value for 'ubifs_change_lp()' */
#define LPROPS_NC 0x80000001 #define LPROPS_NC 0x80000001
...@@ -1451,7 +1449,7 @@ int ubifs_sync_wbufs_by_inode(struct ubifs_info *c, struct inode *inode); ...@@ -1451,7 +1449,7 @@ int ubifs_sync_wbufs_by_inode(struct ubifs_info *c, struct inode *inode);
/* scan.c */ /* scan.c */
struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum, struct ubifs_scan_leb *ubifs_scan(const struct ubifs_info *c, int lnum,
int offs, void *sbuf); int offs, void *sbuf, int quiet);
void ubifs_scan_destroy(struct ubifs_scan_leb *sleb); void ubifs_scan_destroy(struct ubifs_scan_leb *sleb);
int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum, int ubifs_scan_a_node(const struct ubifs_info *c, void *buf, int len, int lnum,
int offs, int quiet); int offs, int quiet);
...@@ -1676,6 +1674,7 @@ const struct ubifs_lprops *ubifs_fast_find_free(struct ubifs_info *c); ...@@ -1676,6 +1674,7 @@ const struct ubifs_lprops *ubifs_fast_find_free(struct ubifs_info *c);
const struct ubifs_lprops *ubifs_fast_find_empty(struct ubifs_info *c); const struct ubifs_lprops *ubifs_fast_find_empty(struct ubifs_info *c);
const struct ubifs_lprops *ubifs_fast_find_freeable(struct ubifs_info *c); const struct ubifs_lprops *ubifs_fast_find_freeable(struct ubifs_info *c);
const struct ubifs_lprops *ubifs_fast_find_frdi_idx(struct ubifs_info *c); const struct ubifs_lprops *ubifs_fast_find_frdi_idx(struct ubifs_info *c);
int ubifs_calc_dark(const struct ubifs_info *c, int spc);
/* file.c */ /* file.c */
int ubifs_fsync(struct file *file, struct dentry *dentry, int datasync); int ubifs_fsync(struct file *file, struct dentry *dentry, int datasync);
......
...@@ -78,9 +78,9 @@ enum { ...@@ -78,9 +78,9 @@ enum {
SECURITY_XATTR, SECURITY_XATTR,
}; };
static struct inode_operations none_inode_operations; static const struct inode_operations none_inode_operations;
static struct address_space_operations none_address_operations; static struct address_space_operations none_address_operations;
static struct file_operations none_file_operations; static const struct file_operations none_file_operations;
/** /**
* create_xattr - create an extended attribute. * create_xattr - create an extended attribute.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment