Commit 9a7259d5 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs

Pull ext3, UDF, and quota fixes from Jan Kara:
 "A couple of ext3 & UDF fixes and also one improvement in quota
  locking."

* 'for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jack/linux-fs:
  ext3: fix start and len arguments handling in ext3_trim_fs()
  udf: Fix deadlock in udf_release_file()
  udf: Fix file entry logicalBlocksRecorded
  udf: Fix handling of i_blocks
  quota: Make quota code not call tty layer with dqptr_sem held
  udf: Init/maintain file entry checkpoint field
  ext3: Update ctime in ext3_splice_branch() only when needed
  ext3: Don't call dquot_free_block() if we don't update anything
  udf: Remove unnecessary OOM messages
parents e9c0f152 e703c206
......@@ -1743,8 +1743,11 @@ ext3_fsblk_t ext3_new_blocks(handle_t *handle, struct inode *inode,
*errp = 0;
brelse(bitmap_bh);
dquot_free_block(inode, *count-num);
*count = num;
if (num < *count) {
dquot_free_block(inode, *count-num);
*count = num;
}
trace_ext3_allocate_blocks(inode, goal, num,
(unsigned long long)ret_block);
......@@ -1970,7 +1973,7 @@ static ext3_grpblk_t ext3_trim_all_free(struct super_block *sb,
sbi = EXT3_SB(sb);
/* Walk through the whole group */
while (start < max) {
while (start <= max) {
start = bitmap_search_next_usable_block(start, bitmap_bh, max);
if (start < 0)
break;
......@@ -1980,7 +1983,7 @@ static ext3_grpblk_t ext3_trim_all_free(struct super_block *sb,
* Allocate contiguous free extents by setting bits in the
* block bitmap
*/
while (next < max
while (next <= max
&& claim_block(sb_bgl_lock(sbi, group),
next, bitmap_bh)) {
next++;
......@@ -2091,73 +2094,74 @@ static ext3_grpblk_t ext3_trim_all_free(struct super_block *sb,
*/
int ext3_trim_fs(struct super_block *sb, struct fstrim_range *range)
{
ext3_grpblk_t last_block, first_block, free_blocks;
unsigned long first_group, last_group;
unsigned long group, ngroups;
ext3_grpblk_t last_block, first_block;
unsigned long group, first_group, last_group;
struct ext3_group_desc *gdp;
struct ext3_super_block *es = EXT3_SB(sb)->s_es;
uint64_t start, len, minlen, trimmed;
uint64_t start, minlen, end, trimmed = 0;
ext3_fsblk_t first_data_blk =
le32_to_cpu(EXT3_SB(sb)->s_es->s_first_data_block);
ext3_fsblk_t max_blks = le32_to_cpu(es->s_blocks_count);
int ret = 0;
start = (range->start >> sb->s_blocksize_bits) +
le32_to_cpu(es->s_first_data_block);
len = range->len >> sb->s_blocksize_bits;
start = range->start >> sb->s_blocksize_bits;
end = start + (range->len >> sb->s_blocksize_bits) - 1;
minlen = range->minlen >> sb->s_blocksize_bits;
trimmed = 0;
if (unlikely(minlen > EXT3_BLOCKS_PER_GROUP(sb)))
if (unlikely(minlen > EXT3_BLOCKS_PER_GROUP(sb)) ||
unlikely(start >= max_blks))
return -EINVAL;
if (start >= max_blks)
return -EINVAL;
if (start + len > max_blks)
len = max_blks - start;
if (end >= max_blks)
end = max_blks - 1;
if (end <= first_data_blk)
goto out;
if (start < first_data_blk)
start = first_data_blk;
ngroups = EXT3_SB(sb)->s_groups_count;
smp_rmb();
/* Determine first and last group to examine based on start and len */
ext3_get_group_no_and_offset(sb, (ext3_fsblk_t) start,
&first_group, &first_block);
ext3_get_group_no_and_offset(sb, (ext3_fsblk_t) (start + len),
ext3_get_group_no_and_offset(sb, (ext3_fsblk_t) end,
&last_group, &last_block);
last_group = (last_group > ngroups - 1) ? ngroups - 1 : last_group;
last_block = EXT3_BLOCKS_PER_GROUP(sb);
if (first_group > last_group)
return -EINVAL;
/* end now represents the last block to discard in this group */
end = EXT3_BLOCKS_PER_GROUP(sb) - 1;
for (group = first_group; group <= last_group; group++) {
gdp = ext3_get_group_desc(sb, group, NULL);
if (!gdp)
break;
free_blocks = le16_to_cpu(gdp->bg_free_blocks_count);
if (free_blocks < minlen)
continue;
/*
* For all the groups except the last one, last block will
* always be EXT3_BLOCKS_PER_GROUP(sb), so we only need to
* change it for the last group in which case first_block +
* len < EXT3_BLOCKS_PER_GROUP(sb).
* always be EXT3_BLOCKS_PER_GROUP(sb)-1, so we only need to
* change it for the last group, note that last_block is
* already computed earlier by ext3_get_group_no_and_offset()
*/
if (first_block + len < EXT3_BLOCKS_PER_GROUP(sb))
last_block = first_block + len;
len -= last_block - first_block;
if (group == last_group)
end = last_block;
ret = ext3_trim_all_free(sb, group, first_block,
last_block, minlen);
if (ret < 0)
break;
if (le16_to_cpu(gdp->bg_free_blocks_count) >= minlen) {
ret = ext3_trim_all_free(sb, group, first_block,
end, minlen);
if (ret < 0)
break;
trimmed += ret;
}
trimmed += ret;
/*
* For every group except the first one, we are sure
* that the first block to discard will be block #0.
*/
first_block = 0;
}
if (ret >= 0)
if (ret > 0)
ret = 0;
range->len = trimmed * sb->s_blocksize;
out:
range->len = trimmed * sb->s_blocksize;
return ret;
}
......@@ -756,6 +756,7 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode,
struct ext3_block_alloc_info *block_i;
ext3_fsblk_t current_block;
struct ext3_inode_info *ei = EXT3_I(inode);
struct timespec now;
block_i = ei->i_block_alloc_info;
/*
......@@ -795,9 +796,11 @@ static int ext3_splice_branch(handle_t *handle, struct inode *inode,
}
/* We are done with atomic stuff, now do the rest of housekeeping */
inode->i_ctime = CURRENT_TIME_SEC;
ext3_mark_inode_dirty(handle, inode);
now = CURRENT_TIME_SEC;
if (!timespec_equal(&inode->i_ctime, &now) || !where->bh) {
inode->i_ctime = now;
ext3_mark_inode_dirty(handle, inode);
}
/* ext3_mark_inode_dirty already updated i_sync_tid */
atomic_set(&ei->i_datasync_tid, handle->h_transaction->t_tid);
......
This diff is collapsed.
......@@ -105,7 +105,6 @@ static void udf_add_free_space(struct super_block *sb, u16 partition, u32 cnt)
}
static void udf_bitmap_free_blocks(struct super_block *sb,
struct inode *inode,
struct udf_bitmap *bitmap,
struct kernel_lb_addr *bloc,
uint32_t offset,
......@@ -172,7 +171,6 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
}
static int udf_bitmap_prealloc_blocks(struct super_block *sb,
struct inode *inode,
struct udf_bitmap *bitmap,
uint16_t partition, uint32_t first_block,
uint32_t block_count)
......@@ -223,7 +221,6 @@ static int udf_bitmap_prealloc_blocks(struct super_block *sb,
}
static int udf_bitmap_new_block(struct super_block *sb,
struct inode *inode,
struct udf_bitmap *bitmap, uint16_t partition,
uint32_t goal, int *err)
{
......@@ -349,7 +346,6 @@ static int udf_bitmap_new_block(struct super_block *sb,
}
static void udf_table_free_blocks(struct super_block *sb,
struct inode *inode,
struct inode *table,
struct kernel_lb_addr *bloc,
uint32_t offset,
......@@ -581,7 +577,6 @@ static void udf_table_free_blocks(struct super_block *sb,
}
static int udf_table_prealloc_blocks(struct super_block *sb,
struct inode *inode,
struct inode *table, uint16_t partition,
uint32_t first_block, uint32_t block_count)
{
......@@ -643,7 +638,6 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
}
static int udf_table_new_block(struct super_block *sb,
struct inode *inode,
struct inode *table, uint16_t partition,
uint32_t goal, int *err)
{
......@@ -743,18 +737,23 @@ void udf_free_blocks(struct super_block *sb, struct inode *inode,
struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
udf_bitmap_free_blocks(sb, inode, map->s_uspace.s_bitmap,
udf_bitmap_free_blocks(sb, map->s_uspace.s_bitmap,
bloc, offset, count);
} else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
udf_table_free_blocks(sb, inode, map->s_uspace.s_table,
udf_table_free_blocks(sb, map->s_uspace.s_table,
bloc, offset, count);
} else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
udf_bitmap_free_blocks(sb, inode, map->s_fspace.s_bitmap,
udf_bitmap_free_blocks(sb, map->s_fspace.s_bitmap,
bloc, offset, count);
} else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
udf_table_free_blocks(sb, inode, map->s_fspace.s_table,
udf_table_free_blocks(sb, map->s_fspace.s_table,
bloc, offset, count);
}
if (inode) {
inode_sub_bytes(inode,
((sector_t)count) << sb->s_blocksize_bits);
}
}
inline int udf_prealloc_blocks(struct super_block *sb,
......@@ -763,29 +762,34 @@ inline int udf_prealloc_blocks(struct super_block *sb,
uint32_t block_count)
{
struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
sector_t allocated;
if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
return udf_bitmap_prealloc_blocks(sb, inode,
map->s_uspace.s_bitmap,
partition, first_block,
block_count);
allocated = udf_bitmap_prealloc_blocks(sb,
map->s_uspace.s_bitmap,
partition, first_block,
block_count);
else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
return udf_table_prealloc_blocks(sb, inode,
map->s_uspace.s_table,
partition, first_block,
block_count);
allocated = udf_table_prealloc_blocks(sb,
map->s_uspace.s_table,
partition, first_block,
block_count);
else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
return udf_bitmap_prealloc_blocks(sb, inode,
map->s_fspace.s_bitmap,
partition, first_block,
block_count);
allocated = udf_bitmap_prealloc_blocks(sb,
map->s_fspace.s_bitmap,
partition, first_block,
block_count);
else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
return udf_table_prealloc_blocks(sb, inode,
map->s_fspace.s_table,
partition, first_block,
block_count);
allocated = udf_table_prealloc_blocks(sb,
map->s_fspace.s_table,
partition, first_block,
block_count);
else
return 0;
if (inode && allocated > 0)
inode_add_bytes(inode, allocated << sb->s_blocksize_bits);
return allocated;
}
inline int udf_new_block(struct super_block *sb,
......@@ -793,25 +797,29 @@ inline int udf_new_block(struct super_block *sb,
uint16_t partition, uint32_t goal, int *err)
{
struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
int block;
if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP)
return udf_bitmap_new_block(sb, inode,
map->s_uspace.s_bitmap,
partition, goal, err);
block = udf_bitmap_new_block(sb,
map->s_uspace.s_bitmap,
partition, goal, err);
else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE)
return udf_table_new_block(sb, inode,
map->s_uspace.s_table,
partition, goal, err);
else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
return udf_bitmap_new_block(sb, inode,
map->s_fspace.s_bitmap,
block = udf_table_new_block(sb,
map->s_uspace.s_table,
partition, goal, err);
else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP)
block = udf_bitmap_new_block(sb,
map->s_fspace.s_bitmap,
partition, goal, err);
else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE)
return udf_table_new_block(sb, inode,
map->s_fspace.s_table,
partition, goal, err);
block = udf_table_new_block(sb,
map->s_fspace.s_table,
partition, goal, err);
else {
*err = -EIO;
return 0;
}
if (inode && block)
inode_add_bytes(inode, sb->s_blocksize);
return block;
}
......@@ -116,6 +116,7 @@ struct inode *udf_new_inode(struct inode *dir, umode_t mode, int *err)
iinfo->i_lenEAttr = 0;
iinfo->i_lenAlloc = 0;
iinfo->i_use = 0;
iinfo->i_checkpoint = 1;
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_AD_IN_ICB))
iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
else if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
......
......@@ -1358,6 +1358,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
iinfo->i_unique = le64_to_cpu(fe->uniqueID);
iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr);
iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs);
iinfo->i_checkpoint = le32_to_cpu(fe->checkpoint);
offset = sizeof(struct fileEntry) + iinfo->i_lenEAttr;
} else {
inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
......@@ -1379,6 +1380,7 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
iinfo->i_unique = le64_to_cpu(efe->uniqueID);
iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr);
iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs);
iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint);
offset = sizeof(struct extendedFileEntry) +
iinfo->i_lenEAttr;
}
......@@ -1495,6 +1497,7 @@ static int udf_update_inode(struct inode *inode, int do_sync)
struct buffer_head *bh = NULL;
struct fileEntry *fe;
struct extendedFileEntry *efe;
uint64_t lb_recorded;
uint32_t udfperms;
uint16_t icbflags;
uint16_t crclen;
......@@ -1589,13 +1592,18 @@ static int udf_update_inode(struct inode *inode, int do_sync)
dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
}
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
lb_recorded = 0; /* No extents => no blocks! */
else
lb_recorded =
(inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >>
(blocksize_bits - 9);
if (iinfo->i_efe == 0) {
memcpy(bh->b_data + sizeof(struct fileEntry),
iinfo->i_ext.i_data,
inode->i_sb->s_blocksize - sizeof(struct fileEntry));
fe->logicalBlocksRecorded = cpu_to_le64(
(inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >>
(blocksize_bits - 9));
fe->logicalBlocksRecorded = cpu_to_le64(lb_recorded);
udf_time_to_disk_stamp(&fe->accessTime, inode->i_atime);
udf_time_to_disk_stamp(&fe->modificationTime, inode->i_mtime);
......@@ -1607,6 +1615,7 @@ static int udf_update_inode(struct inode *inode, int do_sync)
fe->uniqueID = cpu_to_le64(iinfo->i_unique);
fe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr);
fe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
fe->checkpoint = cpu_to_le32(iinfo->i_checkpoint);
fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
crclen = sizeof(struct fileEntry);
} else {
......@@ -1615,9 +1624,7 @@ static int udf_update_inode(struct inode *inode, int do_sync)
inode->i_sb->s_blocksize -
sizeof(struct extendedFileEntry));
efe->objectSize = cpu_to_le64(inode->i_size);
efe->logicalBlocksRecorded = cpu_to_le64(
(inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >>
(blocksize_bits - 9));
efe->logicalBlocksRecorded = cpu_to_le64(lb_recorded);
if (iinfo->i_crtime.tv_sec > inode->i_atime.tv_sec ||
(iinfo->i_crtime.tv_sec == inode->i_atime.tv_sec &&
......@@ -1646,6 +1653,7 @@ static int udf_update_inode(struct inode *inode, int do_sync)
efe->uniqueID = cpu_to_le64(iinfo->i_unique);
efe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr);
efe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
efe->checkpoint = cpu_to_le32(iinfo->i_checkpoint);
efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
crclen = sizeof(struct extendedFileEntry);
}
......
......@@ -950,11 +950,8 @@ static struct udf_bitmap *udf_sb_alloc_bitmap(struct super_block *sb, u32 index)
else
bitmap = vzalloc(size); /* TODO: get rid of vzalloc */
if (bitmap == NULL) {
udf_err(sb, "Unable to allocate space for bitmap and %d buffer_head pointers\n",
nr_groups);
if (bitmap == NULL)
return NULL;
}
bitmap->s_block_bitmap = (struct buffer_head **)(bitmap + 1);
bitmap->s_nr_groups = nr_groups;
......
......@@ -23,6 +23,7 @@ struct udf_inode_info {
__u64 i_lenExtents;
__u32 i_next_alloc_block;
__u32 i_next_alloc_goal;
__u32 i_checkpoint;
unsigned i_alloc_type : 3;
unsigned i_efe : 1; /* extendedFileEntry */
unsigned i_use : 1; /* unallocSpaceEntry */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment