Commit e4ce30f3 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4

* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tytso/ext4: (40 commits)
  ext4: Make fsync sync new parent directories in no-journal mode
  ext4: Drop whitespace at end of lines
  ext4: Fix compat EXT4_IOC_ADD_GROUP
  ext4: Conditionally define compat ioctl numbers
  tracing: Convert more ext4 events to DEFINE_EVENT
  ext4: Add new tracepoints to track mballoc's buddy bitmap loads
  ext4: Add a missing trace hook
  ext4: restart ext4_ext_remove_space() after transaction restart
  ext4: Clear the EXT4_EOFBLOCKS_FL flag only when warranted
  ext4: Avoid crashing on NULL ptr dereference on a filesystem error
  ext4: Use bitops to read/modify i_flags in struct ext4_inode_info
  ext4: Convert calls of ext4_error() to EXT4_ERROR_INODE()
  ext4: Convert callers of ext4_get_blocks() to use ext4_map_blocks()
  ext4: Add new abstraction ext4_map_blocks() underneath ext4_get_blocks()
  ext4: Use our own write_cache_pages()
  ext4: Show journal_checksum option
  ext4: Fix for ext4_mb_collect_stats()
  ext4: check for a good block group before loading buddy pages
  ext4: Prevent creation of files larger than RLIMIT_FSIZE using fallocate
  ext4: Remove extraneous newlines in ext4_msg() calls
  ...

Fixed up trivial conflict in fs/ext4/fsync.c
parents b899ebeb 14ece102
......@@ -591,14 +591,15 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
ret = ext4_mb_new_blocks(handle, &ar, errp);
if (count)
*count = ar.len;
/*
* Account for the allocated meta blocks
* Account for the allocated meta blocks. We will never
* fail EDQUOT for metdata, but we do account for it.
*/
if (!(*errp) && EXT4_I(inode)->i_delalloc_reserved_flag) {
spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
EXT4_I(inode)->i_allocated_meta_blocks += ar.len;
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
dquot_alloc_block_nofail(inode, ar.len);
}
return ret;
}
......
......@@ -83,11 +83,10 @@ int ext4_check_dir_entry(const char *function, struct inode *dir,
error_msg = "inode out of bounds";
if (error_msg != NULL)
__ext4_error(dir->i_sb, function,
"bad entry in directory #%lu: %s - block=%llu"
ext4_error_inode(function, dir,
"bad entry in directory: %s - block=%llu"
"offset=%u(%u), inode=%u, rec_len=%d, name_len=%d",
dir->i_ino, error_msg,
(unsigned long long) bh->b_blocknr,
error_msg, (unsigned long long) bh->b_blocknr,
(unsigned) (offset%bh->b_size), offset,
le32_to_cpu(de->inode),
rlen, de->name_len);
......@@ -111,7 +110,7 @@ static int ext4_readdir(struct file *filp,
if (EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
EXT4_FEATURE_COMPAT_DIR_INDEX) &&
((EXT4_I(inode)->i_flags & EXT4_INDEX_FL) ||
((ext4_test_inode_flag(inode, EXT4_INODE_INDEX)) ||
((inode->i_size >> sb->s_blocksize_bits) == 1))) {
err = ext4_dx_readdir(filp, dirent, filldir);
if (err != ERR_BAD_DX_DIR) {
......@@ -122,20 +121,20 @@ static int ext4_readdir(struct file *filp,
* We don't set the inode dirty flag since it's not
* critical that it get flushed back to the disk.
*/
EXT4_I(filp->f_path.dentry->d_inode)->i_flags &= ~EXT4_INDEX_FL;
ext4_clear_inode_flag(filp->f_path.dentry->d_inode, EXT4_INODE_INDEX);
}
stored = 0;
offset = filp->f_pos & (sb->s_blocksize - 1);
while (!error && !stored && filp->f_pos < inode->i_size) {
ext4_lblk_t blk = filp->f_pos >> EXT4_BLOCK_SIZE_BITS(sb);
struct buffer_head map_bh;
struct ext4_map_blocks map;
struct buffer_head *bh = NULL;
map_bh.b_state = 0;
err = ext4_get_blocks(NULL, inode, blk, 1, &map_bh, 0);
map.m_lblk = filp->f_pos >> EXT4_BLOCK_SIZE_BITS(sb);
map.m_len = 1;
err = ext4_map_blocks(NULL, inode, &map, 0);
if (err > 0) {
pgoff_t index = map_bh.b_blocknr >>
pgoff_t index = map.m_pblk >>
(PAGE_CACHE_SHIFT - inode->i_blkbits);
if (!ra_has_index(&filp->f_ra, index))
page_cache_sync_readahead(
......@@ -143,7 +142,7 @@ static int ext4_readdir(struct file *filp,
&filp->f_ra, filp,
index, 1);
filp->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
bh = ext4_bread(NULL, inode, blk, 0, &err);
bh = ext4_bread(NULL, inode, map.m_lblk, 0, &err);
}
/*
......@@ -152,9 +151,8 @@ static int ext4_readdir(struct file *filp,
*/
if (!bh) {
if (!dir_has_error) {
ext4_error(sb, "directory #%lu "
EXT4_ERROR_INODE(inode, "directory "
"contains a hole at offset %Lu",
inode->i_ino,
(unsigned long long) filp->f_pos);
dir_has_error = 1;
}
......
......@@ -29,6 +29,9 @@
#include <linux/wait.h>
#include <linux/blockgroup_lock.h>
#include <linux/percpu_counter.h>
#ifdef __KERNEL__
#include <linux/compat.h>
#endif
/*
* The fourth extended filesystem constants/structures
......@@ -54,10 +57,10 @@
#endif
#define EXT4_ERROR_INODE(inode, fmt, a...) \
ext4_error_inode(__func__, (inode), (fmt), ## a);
ext4_error_inode(__func__, (inode), (fmt), ## a)
#define EXT4_ERROR_FILE(file, fmt, a...) \
ext4_error_file(__func__, (file), (fmt), ## a);
ext4_error_file(__func__, (file), (fmt), ## a)
/* data type for block offset of block group */
typedef int ext4_grpblk_t;
......@@ -125,6 +128,29 @@ struct ext4_allocation_request {
unsigned int flags;
};
/*
* Logical to physical block mapping, used by ext4_map_blocks()
*
* This structure is used to pass requests into ext4_map_blocks() as
* well as to store the information returned by ext4_map_blocks(). It
* takes less room on the stack than a struct buffer_head.
*/
#define EXT4_MAP_NEW (1 << BH_New)
#define EXT4_MAP_MAPPED (1 << BH_Mapped)
#define EXT4_MAP_UNWRITTEN (1 << BH_Unwritten)
#define EXT4_MAP_BOUNDARY (1 << BH_Boundary)
#define EXT4_MAP_UNINIT (1 << BH_Uninit)
#define EXT4_MAP_FLAGS (EXT4_MAP_NEW | EXT4_MAP_MAPPED |\
EXT4_MAP_UNWRITTEN | EXT4_MAP_BOUNDARY |\
EXT4_MAP_UNINIT)
struct ext4_map_blocks {
ext4_fsblk_t m_pblk;
ext4_lblk_t m_lblk;
unsigned int m_len;
unsigned int m_flags;
};
/*
* For delayed allocation tracking
*/
......@@ -321,6 +347,83 @@ static inline __u32 ext4_mask_flags(umode_t mode, __u32 flags)
return flags & EXT4_OTHER_FLMASK;
}
/*
* Inode flags used for atomic set/get
*/
enum {
EXT4_INODE_SECRM = 0, /* Secure deletion */
EXT4_INODE_UNRM = 1, /* Undelete */
EXT4_INODE_COMPR = 2, /* Compress file */
EXT4_INODE_SYNC = 3, /* Synchronous updates */
EXT4_INODE_IMMUTABLE = 4, /* Immutable file */
EXT4_INODE_APPEND = 5, /* writes to file may only append */
EXT4_INODE_NODUMP = 6, /* do not dump file */
EXT4_INODE_NOATIME = 7, /* do not update atime */
/* Reserved for compression usage... */
EXT4_INODE_DIRTY = 8,
EXT4_INODE_COMPRBLK = 9, /* One or more compressed clusters */
EXT4_INODE_NOCOMPR = 10, /* Don't compress */
EXT4_INODE_ECOMPR = 11, /* Compression error */
/* End compression flags --- maybe not all used */
EXT4_INODE_INDEX = 12, /* hash-indexed directory */
EXT4_INODE_IMAGIC = 13, /* AFS directory */
EXT4_INODE_JOURNAL_DATA = 14, /* file data should be journaled */
EXT4_INODE_NOTAIL = 15, /* file tail should not be merged */
EXT4_INODE_DIRSYNC = 16, /* dirsync behaviour (directories only) */
EXT4_INODE_TOPDIR = 17, /* Top of directory hierarchies*/
EXT4_INODE_HUGE_FILE = 18, /* Set to each huge file */
EXT4_INODE_EXTENTS = 19, /* Inode uses extents */
EXT4_INODE_EA_INODE = 21, /* Inode used for large EA */
EXT4_INODE_EOFBLOCKS = 22, /* Blocks allocated beyond EOF */
EXT4_INODE_RESERVED = 31, /* reserved for ext4 lib */
};
#define TEST_FLAG_VALUE(FLAG) (EXT4_##FLAG##_FL == (1 << EXT4_INODE_##FLAG))
#define CHECK_FLAG_VALUE(FLAG) if (!TEST_FLAG_VALUE(FLAG)) { \
printk(KERN_EMERG "EXT4 flag fail: " #FLAG ": %d %d\n", \
EXT4_##FLAG##_FL, EXT4_INODE_##FLAG); BUG_ON(1); }
/*
* Since it's pretty easy to mix up bit numbers and hex values, and we
* can't do a compile-time test for ENUM values, we use a run-time
* test to make sure that EXT4_XXX_FL is consistent with respect to
* EXT4_INODE_XXX. If all is well the printk and BUG_ON will all drop
* out so it won't cost any extra space in the compiled kernel image.
* But it's important that these values are the same, since we are
* using EXT4_INODE_XXX to test for the flag values, but EXT4_XX_FL
* must be consistent with the values of FS_XXX_FL defined in
* include/linux/fs.h and the on-disk values found in ext2, ext3, and
* ext4 filesystems, and of course the values defined in e2fsprogs.
*
* It's not paranoia if the Murphy's Law really *is* out to get you. :-)
*/
static inline void ext4_check_flag_values(void)
{
CHECK_FLAG_VALUE(SECRM);
CHECK_FLAG_VALUE(UNRM);
CHECK_FLAG_VALUE(COMPR);
CHECK_FLAG_VALUE(SYNC);
CHECK_FLAG_VALUE(IMMUTABLE);
CHECK_FLAG_VALUE(APPEND);
CHECK_FLAG_VALUE(NODUMP);
CHECK_FLAG_VALUE(NOATIME);
CHECK_FLAG_VALUE(DIRTY);
CHECK_FLAG_VALUE(COMPRBLK);
CHECK_FLAG_VALUE(NOCOMPR);
CHECK_FLAG_VALUE(ECOMPR);
CHECK_FLAG_VALUE(INDEX);
CHECK_FLAG_VALUE(IMAGIC);
CHECK_FLAG_VALUE(JOURNAL_DATA);
CHECK_FLAG_VALUE(NOTAIL);
CHECK_FLAG_VALUE(DIRSYNC);
CHECK_FLAG_VALUE(TOPDIR);
CHECK_FLAG_VALUE(HUGE_FILE);
CHECK_FLAG_VALUE(EXTENTS);
CHECK_FLAG_VALUE(EA_INODE);
CHECK_FLAG_VALUE(EOFBLOCKS);
CHECK_FLAG_VALUE(RESERVED);
}
/* Used to pass group descriptor data when online resize is done */
struct ext4_new_group_input {
__u32 group; /* Group number for this data */
......@@ -332,6 +435,18 @@ struct ext4_new_group_input {
__u16 unused;
};
#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
struct compat_ext4_new_group_input {
u32 group;
compat_u64 block_bitmap;
compat_u64 inode_bitmap;
compat_u64 inode_table;
u32 blocks_count;
u16 reserved_blocks;
u16 unused;
};
#endif
/* The struct ext4_new_group_input in kernel space, with free_blocks_count */
struct ext4_new_group_data {
__u32 group;
......@@ -398,6 +513,7 @@ struct ext4_new_group_data {
#define EXT4_IOC_ALLOC_DA_BLKS _IO('f', 12)
#define EXT4_IOC_MOVE_EXT _IOWR('f', 15, struct move_extent)
#if defined(__KERNEL__) && defined(CONFIG_COMPAT)
/*
* ioctl commands in 32 bit emulation
*/
......@@ -408,11 +524,13 @@ struct ext4_new_group_data {
#define EXT4_IOC32_GETRSVSZ _IOR('f', 5, int)
#define EXT4_IOC32_SETRSVSZ _IOW('f', 6, int)
#define EXT4_IOC32_GROUP_EXTEND _IOW('f', 7, unsigned int)
#define EXT4_IOC32_GROUP_ADD _IOW('f', 8, struct compat_ext4_new_group_input)
#ifdef CONFIG_JBD2_DEBUG
#define EXT4_IOC32_WAIT_FOR_READONLY _IOR('f', 99, int)
#endif
#define EXT4_IOC32_GETVERSION_OLD FS_IOC32_GETVERSION
#define EXT4_IOC32_SETVERSION_OLD FS_IOC32_SETVERSION
#endif
/*
......@@ -616,9 +734,8 @@ struct ext4_ext_cache {
*/
struct ext4_inode_info {
__le32 i_data[15]; /* unconverted */
__u32 i_flags;
ext4_fsblk_t i_file_acl;
__u32 i_dtime;
ext4_fsblk_t i_file_acl;
/*
* i_block_group is the number of the block group which contains
......@@ -629,6 +746,7 @@ struct ext4_inode_info {
*/
ext4_group_t i_block_group;
unsigned long i_state_flags; /* Dynamic state flags */
unsigned long i_flags;
ext4_lblk_t i_dir_start_lookup;
#ifdef CONFIG_EXT4_FS_XATTR
......@@ -1062,22 +1180,25 @@ enum {
EXT4_STATE_DA_ALLOC_CLOSE, /* Alloc DA blks on close */
EXT4_STATE_EXT_MIGRATE, /* Inode is migrating */
EXT4_STATE_DIO_UNWRITTEN, /* need convert on dio done*/
EXT4_STATE_NEWENTRY, /* File just added to dir */
};
static inline int ext4_test_inode_state(struct inode *inode, int bit)
{
return test_bit(bit, &EXT4_I(inode)->i_state_flags);
}
static inline void ext4_set_inode_state(struct inode *inode, int bit)
{
set_bit(bit, &EXT4_I(inode)->i_state_flags);
#define EXT4_INODE_BIT_FNS(name, field) \
static inline int ext4_test_inode_##name(struct inode *inode, int bit) \
{ \
return test_bit(bit, &EXT4_I(inode)->i_##field); \
} \
static inline void ext4_set_inode_##name(struct inode *inode, int bit) \
{ \
set_bit(bit, &EXT4_I(inode)->i_##field); \
} \
static inline void ext4_clear_inode_##name(struct inode *inode, int bit) \
{ \
clear_bit(bit, &EXT4_I(inode)->i_##field); \
}
static inline void ext4_clear_inode_state(struct inode *inode, int bit)
{
clear_bit(bit, &EXT4_I(inode)->i_state_flags);
}
EXT4_INODE_BIT_FNS(flag, flags)
EXT4_INODE_BIT_FNS(state, state_flags)
#else
/* Assume that user mode programs are passing in an ext4fs superblock, not
* a kernel struct super_block. This will allow us to call the feature-test
......@@ -1264,7 +1385,7 @@ struct ext4_dir_entry_2 {
#define is_dx(dir) (EXT4_HAS_COMPAT_FEATURE(dir->i_sb, \
EXT4_FEATURE_COMPAT_DIR_INDEX) && \
(EXT4_I(dir)->i_flags & EXT4_INDEX_FL))
ext4_test_inode_flag((dir), EXT4_INODE_INDEX))
#define EXT4_DIR_LINK_MAX(dir) (!is_dx(dir) && (dir)->i_nlink >= EXT4_LINK_MAX)
#define EXT4_DIR_LINK_EMPTY(dir) ((dir)->i_nlink == 2 || (dir)->i_nlink == 1)
......@@ -1678,6 +1799,7 @@ struct ext4_group_info {
ext4_grpblk_t bb_first_free; /* first free block */
ext4_grpblk_t bb_free; /* total free blocks */
ext4_grpblk_t bb_fragments; /* nr of freespace fragments */
ext4_grpblk_t bb_largest_free_order;/* order of largest frag in BG */
struct list_head bb_prealloc_list;
#ifdef DOUBLE_CHECK
void *bb_bitmap;
......@@ -1772,9 +1894,8 @@ extern int ext4_ext_tree_init(handle_t *handle, struct inode *);
extern int ext4_ext_writepage_trans_blocks(struct inode *, int);
extern int ext4_ext_index_trans_blocks(struct inode *inode, int nrblocks,
int chunk);
extern int ext4_ext_get_blocks(handle_t *handle, struct inode *inode,
ext4_lblk_t iblock, unsigned int max_blocks,
struct buffer_head *bh_result, int flags);
extern int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags);
extern void ext4_ext_truncate(struct inode *);
extern void ext4_ext_init(struct super_block *);
extern void ext4_ext_release(struct super_block *);
......@@ -1782,6 +1903,8 @@ extern long ext4_fallocate(struct inode *inode, int mode, loff_t offset,
loff_t len);
extern int ext4_convert_unwritten_extents(struct inode *inode, loff_t offset,
ssize_t len);
extern int ext4_map_blocks(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map, int flags);
extern int ext4_get_blocks(handle_t *handle, struct inode *inode,
sector_t block, unsigned int max_blocks,
struct buffer_head *bh, int flags);
......
......@@ -273,7 +273,7 @@ static inline int ext4_should_journal_data(struct inode *inode)
return 1;
if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
return 1;
if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL)
if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA))
return 1;
return 0;
}
......@@ -284,7 +284,7 @@ static inline int ext4_should_order_data(struct inode *inode)
return 0;
if (!S_ISREG(inode->i_mode))
return 0;
if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL)
if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA))
return 0;
if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
return 1;
......@@ -297,7 +297,7 @@ static inline int ext4_should_writeback_data(struct inode *inode)
return 0;
if (EXT4_JOURNAL(inode) == NULL)
return 1;
if (EXT4_I(inode)->i_flags & EXT4_JOURNAL_DATA_FL)
if (ext4_test_inode_flag(inode, EXT4_INODE_JOURNAL_DATA))
return 0;
if (test_opt(inode->i_sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
return 1;
......@@ -321,7 +321,7 @@ static inline int ext4_should_dioread_nolock(struct inode *inode)
return 0;
if (!S_ISREG(inode->i_mode))
return 0;
if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
return 0;
if (ext4_should_journal_data(inode))
return 0;
......
This diff is collapsed.
......@@ -66,7 +66,7 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
* is smaller than s_maxbytes, which is for extent-mapped files.
*/
if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL)) {
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
size_t length = iov_length(iov, nr_segs);
......
......@@ -34,6 +34,29 @@
#include <trace/events/ext4.h>
/*
* If we're not journaling and this is a just-created file, we have to
* sync our parent directory (if it was freshly created) since
* otherwise it will only be written by writeback, leaving a huge
* window during which a crash may lose the file. This may apply for
* the parent directory's parent as well, and so on recursively, if
* they are also freshly created.
*/
static void ext4_sync_parent(struct inode *inode)
{
struct dentry *dentry = NULL;
while (inode && ext4_test_inode_state(inode, EXT4_STATE_NEWENTRY)) {
ext4_clear_inode_state(inode, EXT4_STATE_NEWENTRY);
dentry = list_entry(inode->i_dentry.next,
struct dentry, d_alias);
if (!dentry || !dentry->d_parent || !dentry->d_parent->d_inode)
break;
inode = dentry->d_parent->d_inode;
sync_mapping_buffers(inode->i_mapping);
}
}
/*
* akpm: A new design for ext4_sync_file().
*
......@@ -67,8 +90,12 @@ int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync)
if (ret < 0)
return ret;
if (!journal)
return simple_fsync(file, dentry, datasync);
if (!journal) {
ret = simple_fsync(file, dentry, datasync);
if (!ret && !list_empty(&inode->i_dentry))
ext4_sync_parent(inode);
return ret;
}
/*
* data=writeback,ordered:
......@@ -102,7 +129,7 @@ int ext4_sync_file(struct file *file, struct dentry *dentry, int datasync)
(journal->j_flags & JBD2_BARRIER))
blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL,
NULL, BLKDEV_IFL_WAIT);
jbd2_log_wait_commit(journal, commit_tid);
ret = jbd2_log_wait_commit(journal, commit_tid);
} else if (journal->j_flags & JBD2_BARRIER)
blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL,
BLKDEV_IFL_WAIT);
......
......@@ -240,56 +240,49 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
if (fatal)
goto error_return;
/* Ok, now we can actually update the inode bitmaps.. */
cleared = ext4_clear_bit_atomic(ext4_group_lock_ptr(sb, block_group),
bit, bitmap_bh->b_data);
if (!cleared)
ext4_error(sb, "bit already cleared for inode %lu", ino);
else {
fatal = -ESRCH;
gdp = ext4_get_group_desc(sb, block_group, &bh2);
if (gdp) {
BUFFER_TRACE(bh2, "get_write_access");
fatal = ext4_journal_get_write_access(handle, bh2);
if (fatal) goto error_return;
if (gdp) {
}
ext4_lock_group(sb, block_group);
cleared = ext4_clear_bit(bit, bitmap_bh->b_data);
if (fatal || !cleared) {
ext4_unlock_group(sb, block_group);
goto out;
}
count = ext4_free_inodes_count(sb, gdp) + 1;
ext4_free_inodes_set(sb, gdp, count);
if (is_directory) {
count = ext4_used_dirs_count(sb, gdp) - 1;
ext4_used_dirs_set(sb, gdp, count);
if (sbi->s_log_groups_per_flex) {
ext4_group_t f;
f = ext4_flex_group(sbi, block_group);
atomic_dec(&sbi->s_flex_groups[f].used_dirs);
}
percpu_counter_dec(&sbi->s_dirs_counter);
}
gdp->bg_checksum = ext4_group_desc_csum(sbi,
block_group, gdp);
gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp);
ext4_unlock_group(sb, block_group);
percpu_counter_inc(&sbi->s_freeinodes_counter);
if (is_directory)
percpu_counter_dec(&sbi->s_dirs_counter);
percpu_counter_inc(&sbi->s_freeinodes_counter);
if (sbi->s_log_groups_per_flex) {
ext4_group_t f;
ext4_group_t f = ext4_flex_group(sbi, block_group);
f = ext4_flex_group(sbi, block_group);
atomic_inc(&sbi->s_flex_groups[f].free_inodes);
}
if (is_directory)
atomic_dec(&sbi->s_flex_groups[f].used_dirs);
}
BUFFER_TRACE(bh2, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, NULL, bh2);
if (!fatal) fatal = err;
}
fatal = ext4_handle_dirty_metadata(handle, NULL, bh2);
out:
if (cleared) {
BUFFER_TRACE(bitmap_bh, "call ext4_handle_dirty_metadata");
err = ext4_handle_dirty_metadata(handle, NULL, bitmap_bh);
if (!fatal)
fatal = err;
sb->s_dirt = 1;
} else
ext4_error(sb, "bit already cleared for inode %lu", ino);
error_return:
brelse(bitmap_bh);
ext4_std_error(sb, fatal);
......@@ -499,7 +492,7 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent,
if (S_ISDIR(mode) &&
((parent == sb->s_root->d_inode) ||
(EXT4_I(parent)->i_flags & EXT4_TOPDIR_FL))) {
(ext4_test_inode_flag(parent, EXT4_INODE_TOPDIR)))) {
int best_ndir = inodes_per_group;
int ret = -1;
......@@ -1041,7 +1034,7 @@ struct inode *ext4_new_inode(handle_t *handle, struct inode *dir, int mode,
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_EXTENTS)) {
/* set extent flag only for directory, file and normal symlink*/
if (S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode)) {
EXT4_I(inode)->i_flags |= EXT4_EXTENTS_FL;
ext4_set_inode_flag(inode, EXT4_INODE_EXTENTS);
ext4_ext_tree_init(handle, inode);
}
}
......
This diff is collapsed.
......@@ -373,7 +373,30 @@ long ext4_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case EXT4_IOC32_SETRSVSZ:
cmd = EXT4_IOC_SETRSVSZ;
break;
case EXT4_IOC_GROUP_ADD:
case EXT4_IOC32_GROUP_ADD: {
struct compat_ext4_new_group_input __user *uinput;
struct ext4_new_group_input input;
mm_segment_t old_fs;
int err;
uinput = compat_ptr(arg);
err = get_user(input.group, &uinput->group);
err |= get_user(input.block_bitmap, &uinput->block_bitmap);
err |= get_user(input.inode_bitmap, &uinput->inode_bitmap);
err |= get_user(input.inode_table, &uinput->inode_table);
err |= get_user(input.blocks_count, &uinput->blocks_count);
err |= get_user(input.reserved_blocks,
&uinput->reserved_blocks);
if (err)
return -EFAULT;
old_fs = get_fs();
set_fs(KERNEL_DS);
err = ext4_ioctl(file, EXT4_IOC_GROUP_ADD,
(unsigned long) &input);
set_fs(old_fs);
return err;
}
case EXT4_IOC_MOVE_EXT:
break;
default:
return -ENOIOCTLCMD;
......
This diff is collapsed.
......@@ -475,7 +475,7 @@ int ext4_ext_migrate(struct inode *inode)
*/
if (!EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb,
EXT4_FEATURE_INCOMPAT_EXTENTS) ||
(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
return -EINVAL;
if (S_ISLNK(inode->i_mode) && inode->i_blocks == 0)
......
......@@ -482,6 +482,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
int depth = ext_depth(orig_inode);
int ret;
start_ext.ee_block = end_ext.ee_block = 0;
o_start = o_end = oext = orig_path[depth].p_ext;
oext_alen = ext4_ext_get_actual_len(oext);
start_ext.ee_len = end_ext.ee_len = 0;
......@@ -529,7 +530,7 @@ mext_leaf_block(handle_t *handle, struct inode *orig_inode,
* new_ext |-------|
*/
if (le32_to_cpu(oext->ee_block) + oext_alen - 1 < new_ext_end) {
ext4_error(orig_inode->i_sb,
EXT4_ERROR_INODE(orig_inode,
"new_ext_end(%u) should be less than or equal to "
"oext->ee_block(%u) + oext_alen(%d) - 1",
new_ext_end, le32_to_cpu(oext->ee_block),
......@@ -692,12 +693,12 @@ mext_replace_branches(handle_t *handle, struct inode *orig_inode,
while (1) {
/* The extent for donor must be found. */
if (!dext) {
ext4_error(donor_inode->i_sb,
EXT4_ERROR_INODE(donor_inode,
"The extent for donor must be found");
*err = -EIO;
goto out;
} else if (donor_off != le32_to_cpu(tmp_dext.ee_block)) {
ext4_error(donor_inode->i_sb,
EXT4_ERROR_INODE(donor_inode,
"Donor offset(%u) and the first block of donor "
"extent(%u) should be equal",
donor_off,
......@@ -976,11 +977,11 @@ mext_check_arguments(struct inode *orig_inode,
}
/* Ext4 move extent supports only extent based file */
if (!(EXT4_I(orig_inode)->i_flags & EXT4_EXTENTS_FL)) {
if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) {
ext4_debug("ext4 move extent: orig file is not extents "
"based file [ino:orig %lu]\n", orig_inode->i_ino);
return -EOPNOTSUPP;
} else if (!(EXT4_I(donor_inode)->i_flags & EXT4_EXTENTS_FL)) {
} else if (!(ext4_test_inode_flag(donor_inode, EXT4_INODE_EXTENTS))) {
ext4_debug("ext4 move extent: donor file is not extents "
"based file [ino:donor %lu]\n", donor_inode->i_ino);
return -EOPNOTSUPP;
......@@ -1354,7 +1355,7 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
if (ret1 < 0)
break;
if (*moved_len > len) {
ext4_error(orig_inode->i_sb,
EXT4_ERROR_INODE(orig_inode,
"We replaced blocks too much! "
"sum of replaced: %llu requested: %llu",
*moved_len, len);
......
......@@ -656,7 +656,7 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
dxtrace(printk(KERN_DEBUG "In htree_fill_tree, start hash: %x:%x\n",
start_hash, start_minor_hash));
dir = dir_file->f_path.dentry->d_inode;
if (!(EXT4_I(dir)->i_flags & EXT4_INDEX_FL)) {
if (!(ext4_test_inode_flag(dir, EXT4_INODE_INDEX))) {
hinfo.hash_version = EXT4_SB(dir->i_sb)->s_def_hash_version;
if (hinfo.hash_version <= DX_HASH_TEA)
hinfo.hash_version +=
......@@ -801,7 +801,7 @@ static void ext4_update_dx_flag(struct inode *inode)
{
if (!EXT4_HAS_COMPAT_FEATURE(inode->i_sb,
EXT4_FEATURE_COMPAT_DIR_INDEX))
EXT4_I(inode)->i_flags &= ~EXT4_INDEX_FL;
ext4_clear_inode_flag(inode, EXT4_INODE_INDEX);
}
/*
......@@ -943,8 +943,8 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
wait_on_buffer(bh);
if (!buffer_uptodate(bh)) {
/* read error, skip block & hope for the best */
ext4_error(sb, "reading directory #%lu offset %lu",
dir->i_ino, (unsigned long)block);
EXT4_ERROR_INODE(dir, "reading directory lblock %lu",
(unsigned long) block);
brelse(bh);
goto next;
}
......@@ -1066,13 +1066,13 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, stru
__u32 ino = le32_to_cpu(de->inode);
brelse(bh);
if (!ext4_valid_inum(dir->i_sb, ino)) {
ext4_error(dir->i_sb, "bad inode number: %u", ino);
EXT4_ERROR_INODE(dir, "bad inode number: %u", ino);
return ERR_PTR(-EIO);
}
inode = ext4_iget(dir->i_sb, ino);
if (unlikely(IS_ERR(inode))) {
if (PTR_ERR(inode) == -ESTALE) {
ext4_error(dir->i_sb,
EXT4_ERROR_INODE(dir,
"deleted inode referenced: %u",
ino);
return ERR_PTR(-EIO);
......@@ -1104,8 +1104,8 @@ struct dentry *ext4_get_parent(struct dentry *child)
brelse(bh);
if (!ext4_valid_inum(child->d_inode->i_sb, ino)) {
ext4_error(child->d_inode->i_sb,
"bad inode number: %u", ino);
EXT4_ERROR_INODE(child->d_inode,
"bad parent inode number: %u", ino);
return ERR_PTR(-EIO);
}
......@@ -1404,9 +1404,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
de = (struct ext4_dir_entry_2 *)((char *)fde +
ext4_rec_len_from_disk(fde->rec_len, blocksize));
if ((char *) de >= (((char *) root) + blocksize)) {
ext4_error(dir->i_sb,
"invalid rec_len for '..' in inode %lu",
dir->i_ino);
EXT4_ERROR_INODE(dir, "invalid rec_len for '..'");
brelse(bh);
return -EIO;
}
......@@ -1418,7 +1416,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
brelse(bh);
return retval;
}
EXT4_I(dir)->i_flags |= EXT4_INDEX_FL;
ext4_set_inode_flag(dir, EXT4_INODE_INDEX);
data1 = bh2->b_data;
memcpy (data1, de, len);
......@@ -1491,7 +1489,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
retval = ext4_dx_add_entry(handle, dentry, inode);
if (!retval || (retval != ERR_BAD_DX_DIR))
return retval;
EXT4_I(dir)->i_flags &= ~EXT4_INDEX_FL;
ext4_clear_inode_flag(dir, EXT4_INODE_INDEX);
dx_fallback++;
ext4_mark_inode_dirty(handle, dir);
}
......@@ -1519,6 +1517,8 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
de->rec_len = ext4_rec_len_to_disk(blocksize, blocksize);
retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
brelse(bh);
if (retval == 0)
ext4_set_inode_state(inode, EXT4_STATE_NEWENTRY);
return retval;
}
......@@ -1915,9 +1915,8 @@ static int empty_dir(struct inode *inode)
if (inode->i_size < EXT4_DIR_REC_LEN(1) + EXT4_DIR_REC_LEN(2) ||
!(bh = ext4_bread(NULL, inode, 0, 0, &err))) {
if (err)
ext4_error(inode->i_sb,
"error %d reading directory #%lu offset 0",
err, inode->i_ino);
EXT4_ERROR_INODE(inode,
"error %d reading directory lblock 0", err);
else
ext4_warning(inode->i_sb,
"bad directory (dir #%lu) - no data block",
......@@ -1942,16 +1941,16 @@ static int empty_dir(struct inode *inode)
while (offset < inode->i_size) {
if (!bh ||
(void *) de >= (void *) (bh->b_data+sb->s_blocksize)) {
unsigned int lblock;
err = 0;
brelse(bh);
bh = ext4_bread(NULL, inode,
offset >> EXT4_BLOCK_SIZE_BITS(sb), 0, &err);
lblock = offset >> EXT4_BLOCK_SIZE_BITS(sb);
bh = ext4_bread(NULL, inode, lblock, 0, &err);
if (!bh) {
if (err)
ext4_error(sb,
"error %d reading directory"
" #%lu offset %u",
err, inode->i_ino, offset);
EXT4_ERROR_INODE(inode,
"error %d reading directory "
"lblock %u", err, lblock);
offset += sb->s_blocksize;
continue;
}
......@@ -2297,7 +2296,7 @@ static int ext4_symlink(struct inode *dir,
}
} else {
/* clear the extent format for fast symlink */
EXT4_I(inode)->i_flags &= ~EXT4_EXTENTS_FL;
ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
inode->i_op = &ext4_fast_symlink_inode_operations;
memcpy((char *)&EXT4_I(inode)->i_data, symname, l);
inode->i_size = l-1;
......
......@@ -911,7 +911,8 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
percpu_counter_add(&sbi->s_freeinodes_counter,
EXT4_INODES_PER_GROUP(sb));
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG) &&
sbi->s_log_groups_per_flex) {
ext4_group_t flex_group;
flex_group = ext4_flex_group(sbi, input->group);
atomic_add(input->free_blocks_count,
......
......@@ -241,6 +241,7 @@ handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
if (sb->s_flags & MS_RDONLY)
return ERR_PTR(-EROFS);
vfs_check_frozen(sb, SB_FREEZE_WRITE);
/* Special case here: if the journal has aborted behind our
* backs (eg. EIO in the commit thread), then we still need to
* take the FS itself readonly cleanly. */
......@@ -941,6 +942,8 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0");
if (test_opt(sb, JOURNAL_ASYNC_COMMIT))
seq_puts(seq, ",journal_async_commit");
else if (test_opt(sb, JOURNAL_CHECKSUM))
seq_puts(seq, ",journal_checksum");
if (test_opt(sb, NOBH))
seq_puts(seq, ",nobh");
if (test_opt(sb, I_VERSION))
......@@ -2430,6 +2433,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
__releases(kernel_lock)
__acquires(kernel_lock)
{
char *orig_data = kstrdup(data, GFP_KERNEL);
struct buffer_head *bh;
struct ext4_super_block *es = NULL;
struct ext4_sb_info *sbi;
......@@ -2793,24 +2797,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
get_random_bytes(&sbi->s_next_generation, sizeof(u32));
spin_lock_init(&sbi->s_next_gen_lock);
err = percpu_counter_init(&sbi->s_freeblocks_counter,
ext4_count_free_blocks(sb));
if (!err) {
err = percpu_counter_init(&sbi->s_freeinodes_counter,
ext4_count_free_inodes(sb));
}
if (!err) {
err = percpu_counter_init(&sbi->s_dirs_counter,
ext4_count_dirs(sb));
}
if (!err) {
err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0);
}
if (err) {
ext4_msg(sb, KERN_ERR, "insufficient memory");
goto failed_mount3;
}
sbi->s_stripe = ext4_get_stripe_size(sbi);
sbi->s_max_writeback_mb_bump = 128;
......@@ -2910,6 +2896,20 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
no_journal:
err = percpu_counter_init(&sbi->s_freeblocks_counter,
ext4_count_free_blocks(sb));
if (!err)
err = percpu_counter_init(&sbi->s_freeinodes_counter,
ext4_count_free_inodes(sb));
if (!err)
err = percpu_counter_init(&sbi->s_dirs_counter,
ext4_count_dirs(sb));
if (!err)
err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0);
if (err) {
ext4_msg(sb, KERN_ERR, "insufficient memory");
goto failed_mount_wq;
}
if (test_opt(sb, NOBH)) {
if (!(test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)) {
ext4_msg(sb, KERN_WARNING, "Ignoring nobh option - "
......@@ -3001,7 +3001,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
err = ext4_setup_system_zone(sb);
if (err) {
ext4_msg(sb, KERN_ERR, "failed to initialize system "
"zone (%d)\n", err);
"zone (%d)", err);
goto failed_mount4;
}
......@@ -3040,9 +3040,11 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
} else
descr = "out journal";
ext4_msg(sb, KERN_INFO, "mounted filesystem with%s", descr);
ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
"Opts: %s", descr, orig_data);
lock_kernel();
kfree(orig_data);
return 0;
cantfind_ext4:
......@@ -3059,6 +3061,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
jbd2_journal_destroy(sbi->s_journal);
sbi->s_journal = NULL;
}
percpu_counter_destroy(&sbi->s_freeblocks_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
failed_mount3:
if (sbi->s_flex_groups) {
if (is_vmalloc_addr(sbi->s_flex_groups))
......@@ -3066,10 +3072,6 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
else
kfree(sbi->s_flex_groups);
}
percpu_counter_destroy(&sbi->s_freeblocks_counter);
percpu_counter_destroy(&sbi->s_freeinodes_counter);
percpu_counter_destroy(&sbi->s_dirs_counter);
percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
failed_mount2:
for (i = 0; i < db_count; i++)
brelse(sbi->s_group_desc[i]);
......@@ -3089,6 +3091,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
kfree(sbi->s_blockgroup_lock);
kfree(sbi);
lock_kernel();
kfree(orig_data);
return ret;
}
......@@ -3485,8 +3488,10 @@ int ext4_force_commit(struct super_block *sb)
return 0;
journal = EXT4_SB(sb)->s_journal;
if (journal)
if (journal) {
vfs_check_frozen(sb, SB_FREEZE_WRITE);
ret = ext4_journal_force_commit(journal);
}
return ret;
}
......@@ -3535,18 +3540,16 @@ static int ext4_freeze(struct super_block *sb)
* the journal.
*/
error = jbd2_journal_flush(journal);
if (error < 0) {
out:
jbd2_journal_unlock_updates(journal);
return error;
}
if (error < 0)
goto out;
/* Journal blocked and flushed, clear needs_recovery flag. */
EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
error = ext4_commit_super(sb, 1);
if (error)
goto out;
return 0;
out:
/* we rely on s_frozen to stop further updates */
jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
return error;
}
/*
......@@ -3563,7 +3566,6 @@ static int ext4_unfreeze(struct super_block *sb)
EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
ext4_commit_super(sb, 1);
unlock_super(sb);
jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
return 0;
}
......@@ -3580,6 +3582,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
#ifdef CONFIG_QUOTA
int i;
#endif
char *orig_data = kstrdup(data, GFP_KERNEL);
lock_kernel();
......@@ -3713,6 +3716,9 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
#endif
unlock_super(sb);
unlock_kernel();
ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
kfree(orig_data);
return 0;
restore_opts:
......@@ -3734,6 +3740,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
#endif
unlock_super(sb);
unlock_kernel();
kfree(orig_data);
return err;
}
......@@ -4141,6 +4148,7 @@ static int __init init_ext4_fs(void)
{
int err;
ext4_check_flag_values();
err = init_ext4_system_zone();
if (err)
return err;
......
......@@ -34,6 +34,7 @@ const struct inode_operations ext4_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = page_follow_link_light,
.put_link = page_put_link,
.setattr = ext4_setattr,
#ifdef CONFIG_EXT4_FS_XATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
......@@ -45,6 +46,7 @@ const struct inode_operations ext4_symlink_inode_operations = {
const struct inode_operations ext4_fast_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = ext4_follow_link,
.setattr = ext4_setattr,
#ifdef CONFIG_EXT4_FS_XATTR
.setxattr = generic_setxattr,
.getxattr = generic_getxattr,
......
......@@ -228,8 +228,7 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
if (ext4_xattr_check_block(bh)) {
bad_block:
ext4_error(inode->i_sb,
"inode %lu: bad block %llu", inode->i_ino,
EXT4_ERROR_INODE(inode, "bad block %llu",
EXT4_I(inode)->i_file_acl);
error = -EIO;
goto cleanup;
......@@ -372,8 +371,7 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
ea_bdebug(bh, "b_count=%d, refcount=%d",
atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
if (ext4_xattr_check_block(bh)) {
ext4_error(inode->i_sb,
"inode %lu: bad block %llu", inode->i_ino,
EXT4_ERROR_INODE(inode, "bad block %llu",
EXT4_I(inode)->i_file_acl);
error = -EIO;
goto cleanup;
......@@ -666,8 +664,8 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
atomic_read(&(bs->bh->b_count)),
le32_to_cpu(BHDR(bs->bh)->h_refcount));
if (ext4_xattr_check_block(bs->bh)) {
ext4_error(sb, "inode %lu: bad block %llu",
inode->i_ino, EXT4_I(inode)->i_file_acl);
EXT4_ERROR_INODE(inode, "bad block %llu",
EXT4_I(inode)->i_file_acl);
error = -EIO;
goto cleanup;
}
......@@ -820,7 +818,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
EXT4_I(inode)->i_block_group);
/* non-extent files can't have physical blocks past 2^32 */
if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
block = ext4_new_meta_blocks(handle, inode,
......@@ -828,7 +826,7 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
if (error)
goto cleanup;
if (!(EXT4_I(inode)->i_flags & EXT4_EXTENTS_FL))
if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
BUG_ON(block > EXT4_MAX_BLOCK_FILE_PHYS);
ea_idebug(inode, "creating block %d", block);
......@@ -880,8 +878,8 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
goto cleanup;
bad_block:
ext4_error(inode->i_sb, "inode %lu: bad block %llu",
inode->i_ino, EXT4_I(inode)->i_file_acl);
EXT4_ERROR_INODE(inode, "bad block %llu",
EXT4_I(inode)->i_file_acl);
goto cleanup;
#undef header
......@@ -1194,8 +1192,8 @@ int ext4_expand_extra_isize_ea(struct inode *inode, int new_extra_isize,
if (!bh)
goto cleanup;
if (ext4_xattr_check_block(bh)) {
ext4_error(inode->i_sb, "inode %lu: bad block %llu",
inode->i_ino, EXT4_I(inode)->i_file_acl);
EXT4_ERROR_INODE(inode, "bad block %llu",
EXT4_I(inode)->i_file_acl);
error = -EIO;
goto cleanup;
}
......@@ -1372,14 +1370,14 @@ ext4_xattr_delete_inode(handle_t *handle, struct inode *inode)
goto cleanup;
bh = sb_bread(inode->i_sb, EXT4_I(inode)->i_file_acl);
if (!bh) {
ext4_error(inode->i_sb, "inode %lu: block %llu read error",
inode->i_ino, EXT4_I(inode)->i_file_acl);
EXT4_ERROR_INODE(inode, "block %llu read error",
EXT4_I(inode)->i_file_acl);
goto cleanup;
}
if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
BHDR(bh)->h_blocks != cpu_to_le32(1)) {
ext4_error(inode->i_sb, "inode %lu: bad block %llu",
inode->i_ino, EXT4_I(inode)->i_file_acl);
EXT4_ERROR_INODE(inode, "bad block %llu",
EXT4_I(inode)->i_file_acl);
goto cleanup;
}
ext4_xattr_release_block(handle, inode, bh);
......@@ -1504,9 +1502,8 @@ ext4_xattr_cache_find(struct inode *inode, struct ext4_xattr_header *header,
}
bh = sb_bread(inode->i_sb, ce->e_block);
if (!bh) {
ext4_error(inode->i_sb,
"inode %lu: block %lu read error",
inode->i_ino, (unsigned long) ce->e_block);
EXT4_ERROR_INODE(inode, "block %lu read error",
(unsigned long) ce->e_block);
} else if (le32_to_cpu(BHDR(bh)->h_refcount) >=
EXT4_XATTR_REFCOUNT_MAX) {
ea_idebug(inode, "block %lu refcount %d>=%d",
......
......@@ -1311,7 +1311,6 @@ int jbd2_journal_stop(handle_t *handle)
if (handle->h_sync)
transaction->t_synchronous_commit = 1;
current->journal_info = NULL;
spin_lock(&journal->j_state_lock);
spin_lock(&transaction->t_handle_lock);
transaction->t_outstanding_credits -= handle->h_buffer_credits;
transaction->t_updates--;
......@@ -1340,8 +1339,7 @@ int jbd2_journal_stop(handle_t *handle)
jbd_debug(2, "transaction too old, requesting commit for "
"handle %p\n", handle);
/* This is non-blocking */
__jbd2_log_start_commit(journal, transaction->t_tid);
spin_unlock(&journal->j_state_lock);
jbd2_log_start_commit(journal, transaction->t_tid);
/*
* Special case: JBD2_SYNC synchronous updates require us
......@@ -1351,7 +1349,6 @@ int jbd2_journal_stop(handle_t *handle)
err = jbd2_log_wait_commit(journal, tid);
} else {
spin_unlock(&transaction->t_handle_lock);
spin_unlock(&journal->j_state_lock);
}
lock_map_release(&handle->h_lockdep_map);
......
......@@ -1514,11 +1514,13 @@ static void inode_decr_space(struct inode *inode, qsize_t number, int reserve)
/*
* This operation can block, but only after everything is updated
*/
int __dquot_alloc_space(struct inode *inode, qsize_t number,
int warn, int reserve)
int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags)
{
int cnt, ret = 0;
char warntype[MAXQUOTAS];
int warn = flags & DQUOT_SPACE_WARN;
int reserve = flags & DQUOT_SPACE_RESERVE;
int nofail = flags & DQUOT_SPACE_NOFAIL;
/*
* First test before acquiring mutex - solves deadlocks when we
......@@ -1539,7 +1541,7 @@ int __dquot_alloc_space(struct inode *inode, qsize_t number,
continue;
ret = check_bdq(inode->i_dquot[cnt], number, !warn,
warntype+cnt);
if (ret) {
if (ret && !nofail) {
spin_unlock(&dq_data_lock);
goto out_flush_warn;
}
......@@ -1638,10 +1640,11 @@ EXPORT_SYMBOL(dquot_claim_space_nodirty);
/*
* This operation can block, but only after everything is updated
*/
void __dquot_free_space(struct inode *inode, qsize_t number, int reserve)
void __dquot_free_space(struct inode *inode, qsize_t number, int flags)
{
unsigned int cnt;
char warntype[MAXQUOTAS];
int reserve = flags & DQUOT_SPACE_RESERVE;
/* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex */
......
......@@ -9,6 +9,10 @@
#include <linux/fs.h>
#define DQUOT_SPACE_WARN 0x1
#define DQUOT_SPACE_RESERVE 0x2
#define DQUOT_SPACE_NOFAIL 0x4
static inline struct quota_info *sb_dqopt(struct super_block *sb)
{
return &sb->s_dquot;
......@@ -41,9 +45,8 @@ int dquot_scan_active(struct super_block *sb,
struct dquot *dquot_alloc(struct super_block *sb, int type);
void dquot_destroy(struct dquot *dquot);
int __dquot_alloc_space(struct inode *inode, qsize_t number,
int warn, int reserve);
void __dquot_free_space(struct inode *inode, qsize_t number, int reserve);
int __dquot_alloc_space(struct inode *inode, qsize_t number, int flags);
void __dquot_free_space(struct inode *inode, qsize_t number, int flags);
int dquot_alloc_inode(const struct inode *inode);
......@@ -242,17 +245,17 @@ static inline int dquot_transfer(struct inode *inode, struct iattr *iattr)
}
static inline int __dquot_alloc_space(struct inode *inode, qsize_t number,
int warn, int reserve)
int flags)
{
if (!reserve)
if (!(flags & DQUOT_SPACE_RESERVE))
inode_add_bytes(inode, number);
return 0;
}
static inline void __dquot_free_space(struct inode *inode, qsize_t number,
int reserve)
int flags)
{
if (!reserve)
if (!(flags & DQUOT_SPACE_RESERVE))
inode_sub_bytes(inode, number);
}
......@@ -268,7 +271,13 @@ static inline int dquot_claim_space_nodirty(struct inode *inode, qsize_t number)
static inline int dquot_alloc_space_nodirty(struct inode *inode, qsize_t nr)
{
return __dquot_alloc_space(inode, nr, 1, 0);
return __dquot_alloc_space(inode, nr, DQUOT_SPACE_WARN);
}
static inline void dquot_alloc_space_nofail(struct inode *inode, qsize_t nr)
{
__dquot_alloc_space(inode, nr, DQUOT_SPACE_WARN|DQUOT_SPACE_NOFAIL);
mark_inode_dirty(inode);
}
static inline int dquot_alloc_space(struct inode *inode, qsize_t nr)
......@@ -286,6 +295,11 @@ static inline int dquot_alloc_block_nodirty(struct inode *inode, qsize_t nr)
return dquot_alloc_space_nodirty(inode, nr << inode->i_blkbits);
}
static inline void dquot_alloc_block_nofail(struct inode *inode, qsize_t nr)
{
dquot_alloc_space_nofail(inode, nr << inode->i_blkbits);
}
static inline int dquot_alloc_block(struct inode *inode, qsize_t nr)
{
return dquot_alloc_space(inode, nr << inode->i_blkbits);
......@@ -293,7 +307,7 @@ static inline int dquot_alloc_block(struct inode *inode, qsize_t nr)
static inline int dquot_prealloc_block_nodirty(struct inode *inode, qsize_t nr)
{
return __dquot_alloc_space(inode, nr << inode->i_blkbits, 0, 0);
return __dquot_alloc_space(inode, nr << inode->i_blkbits, 0);
}
static inline int dquot_prealloc_block(struct inode *inode, qsize_t nr)
......@@ -308,7 +322,8 @@ static inline int dquot_prealloc_block(struct inode *inode, qsize_t nr)
static inline int dquot_reserve_block(struct inode *inode, qsize_t nr)
{
return __dquot_alloc_space(inode, nr << inode->i_blkbits, 1, 1);
return __dquot_alloc_space(inode, nr << inode->i_blkbits,
DQUOT_SPACE_WARN|DQUOT_SPACE_RESERVE);
}
static inline int dquot_claim_block(struct inode *inode, qsize_t nr)
......@@ -345,7 +360,7 @@ static inline void dquot_free_block(struct inode *inode, qsize_t nr)
static inline void dquot_release_reservation_block(struct inode *inode,
qsize_t nr)
{
__dquot_free_space(inode, nr << inode->i_blkbits, 1);
__dquot_free_space(inode, nr << inode->i_blkbits, DQUOT_SPACE_RESERVE);
}
#endif /* _LINUX_QUOTAOPS_ */
......@@ -353,7 +353,7 @@ TRACE_EVENT(ext4_discard_blocks,
jbd2_dev_to_name(__entry->dev), __entry->blk, __entry->count)
);
TRACE_EVENT(ext4_mb_new_inode_pa,
DECLARE_EVENT_CLASS(ext4__mb_new_pa,
TP_PROTO(struct ext4_allocation_context *ac,
struct ext4_prealloc_space *pa),
......@@ -381,32 +381,20 @@ TRACE_EVENT(ext4_mb_new_inode_pa,
__entry->pa_pstart, __entry->pa_len, __entry->pa_lstart)
);
TRACE_EVENT(ext4_mb_new_group_pa,
DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_inode_pa,
TP_PROTO(struct ext4_allocation_context *ac,
struct ext4_prealloc_space *pa),
TP_ARGS(ac, pa),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( __u64, pa_pstart )
__field( __u32, pa_len )
__field( __u64, pa_lstart )
TP_ARGS(ac, pa)
);
),
DEFINE_EVENT(ext4__mb_new_pa, ext4_mb_new_group_pa,
TP_fast_assign(
__entry->dev = ac->ac_sb->s_dev;
__entry->ino = ac->ac_inode->i_ino;
__entry->pa_pstart = pa->pa_pstart;
__entry->pa_len = pa->pa_len;
__entry->pa_lstart = pa->pa_lstart;
),
TP_PROTO(struct ext4_allocation_context *ac,
struct ext4_prealloc_space *pa),
TP_printk("dev %s ino %lu pstart %llu len %u lstart %llu",
jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
__entry->pa_pstart, __entry->pa_len, __entry->pa_lstart)
TP_ARGS(ac, pa)
);
TRACE_EVENT(ext4_mb_release_inode_pa,
......@@ -790,7 +778,7 @@ TRACE_EVENT(ext4_mballoc_prealloc,
__entry->result_len, __entry->result_logical)
);
TRACE_EVENT(ext4_mballoc_discard,
DECLARE_EVENT_CLASS(ext4__mballoc,
TP_PROTO(struct ext4_allocation_context *ac),
TP_ARGS(ac),
......@@ -819,33 +807,18 @@ TRACE_EVENT(ext4_mballoc_discard,
__entry->result_len, __entry->result_logical)
);
TRACE_EVENT(ext4_mballoc_free,
DEFINE_EVENT(ext4__mballoc, ext4_mballoc_discard,
TP_PROTO(struct ext4_allocation_context *ac),
TP_ARGS(ac),
TP_ARGS(ac)
);
TP_STRUCT__entry(
__field( dev_t, dev )
__field( ino_t, ino )
__field( __u32, result_logical )
__field( int, result_start )
__field( __u32, result_group )
__field( int, result_len )
),
DEFINE_EVENT(ext4__mballoc, ext4_mballoc_free,
TP_fast_assign(
__entry->dev = ac->ac_inode->i_sb->s_dev;
__entry->ino = ac->ac_inode->i_ino;
__entry->result_logical = ac->ac_b_ex.fe_logical;
__entry->result_start = ac->ac_b_ex.fe_start;
__entry->result_group = ac->ac_b_ex.fe_group;
__entry->result_len = ac->ac_b_ex.fe_len;
),
TP_PROTO(struct ext4_allocation_context *ac),
TP_printk("dev %s inode %lu extent %u/%d/%u@%u ",
jbd2_dev_to_name(__entry->dev), (unsigned long) __entry->ino,
__entry->result_group, __entry->result_start,
__entry->result_len, __entry->result_logical)
TP_ARGS(ac)
);
TRACE_EVENT(ext4_forget,
......@@ -974,6 +947,39 @@ TRACE_EVENT(ext4_da_release_space,
__entry->reserved_meta_blocks, __entry->allocated_meta_blocks)
);
DECLARE_EVENT_CLASS(ext4__bitmap_load,
TP_PROTO(struct super_block *sb, unsigned long group),
TP_ARGS(sb, group),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( __u32, group )
),
TP_fast_assign(
__entry->dev = sb->s_dev;
__entry->group = group;
),
TP_printk("dev %s group %u",
jbd2_dev_to_name(__entry->dev), __entry->group)
);
DEFINE_EVENT(ext4__bitmap_load, ext4_mb_bitmap_load,
TP_PROTO(struct super_block *sb, unsigned long group),
TP_ARGS(sb, group)
);
DEFINE_EVENT(ext4__bitmap_load, ext4_mb_buddy_bitmap_load,
TP_PROTO(struct super_block *sb, unsigned long group),
TP_ARGS(sb, group)
);
#endif /* _TRACE_EXT4_H */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment