Commit 1832a6d5 authored by Chris Mason's avatar Chris Mason

Btrfs: Implement basic support for -ENOSPC

This is intended to prevent accidentally filling the drive.  A determined
user can still make things oops.

It includes some accounting of the current bytes under delayed allocation,
but this will change as things get optimized
Signed-off-by: default avatarChris Mason <chris.mason@oracle.com>
parent 01f46658
......@@ -346,6 +346,8 @@ struct btrfs_fs_info {
int closing;
u64 total_pinned;
spinlock_t delalloc_lock;
u64 delalloc_bytes;
};
/*
* in ram representation of the tree. extent_root is used for all allocations
......@@ -1115,6 +1117,8 @@ int btrfs_csum_truncate(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_path *path,
u64 isize);
/* inode.c */
int btrfs_check_free_space(struct btrfs_root *root, u64 num_required,
int for_del);
int btrfs_page_mkwrite(struct vm_area_struct *vma, struct page *page);
int btrfs_readpage(struct file *file, struct page *page);
void btrfs_delete_inode(struct inode *inode);
......
......@@ -223,7 +223,8 @@ static int btree_writepages(struct address_space *mapping,
} else {
thresh = 8 * 1024 * 1024;
}
num_dirty = count_range_bits(tree, &start, thresh, EXTENT_DIRTY);
num_dirty = count_range_bits(tree, &start, (u64)-1,
thresh, EXTENT_DIRTY);
if (num_dirty < thresh) {
return 0;
}
......@@ -559,6 +560,7 @@ struct btrfs_root *open_ctree(struct super_block *sb)
INIT_LIST_HEAD(&fs_info->dead_roots);
INIT_LIST_HEAD(&fs_info->hashers);
spin_lock_init(&fs_info->hash_lock);
spin_lock_init(&fs_info->delalloc_lock);
memset(&fs_info->super_kobj, 0, sizeof(fs_info->super_kobj));
init_completion(&fs_info->kobj_unregister);
......@@ -570,6 +572,7 @@ struct btrfs_root *open_ctree(struct super_block *sb)
fs_info->sb = sb;
fs_info->mount_opt = 0;
fs_info->max_extent = (u64)-1;
fs_info->delalloc_bytes = 0;
fs_info->btree_inode = new_inode(sb);
fs_info->btree_inode->i_ino = 1;
fs_info->btree_inode->i_nlink = 1;
......
......@@ -1131,7 +1131,8 @@ u64 find_lock_delalloc_range(struct extent_map_tree *tree,
}
u64 count_range_bits(struct extent_map_tree *tree,
u64 *start, u64 max_bytes, unsigned long bits)
u64 *start, u64 search_end, u64 max_bytes,
unsigned long bits)
{
struct rb_node *node;
struct extent_state *state;
......@@ -1139,9 +1140,14 @@ u64 count_range_bits(struct extent_map_tree *tree,
u64 total_bytes = 0;
int found = 0;
if (search_end <= cur_start) {
printk("search_end %Lu start %Lu\n", search_end, cur_start);
WARN_ON(1);
return 0;
}
write_lock_irq(&tree->lock);
if (bits == EXTENT_DIRTY) {
*start = 0;
if (cur_start == 0 && bits == EXTENT_DIRTY) {
total_bytes = tree->dirty_bytes;
goto out;
}
......@@ -1156,8 +1162,11 @@ u64 count_range_bits(struct extent_map_tree *tree,
while(1) {
state = rb_entry(node, struct extent_state, rb_node);
if ((state->state & bits)) {
total_bytes += state->end - state->start + 1;
if (state->start > search_end)
break;
if (state->end >= cur_start && (state->state & bits)) {
total_bytes += min(search_end, state->end) + 1 -
max(cur_start, state->start);
if (total_bytes >= max_bytes)
break;
if (!found) {
......@@ -1173,7 +1182,6 @@ u64 count_range_bits(struct extent_map_tree *tree,
write_unlock_irq(&tree->lock);
return total_bytes;
}
/*
* helper function to lock both pages and extents in the tree.
* pages must be locked first.
......
......@@ -115,7 +115,8 @@ int __init extent_map_init(void);
void extent_map_exit(void);
u64 count_range_bits(struct extent_map_tree *tree,
u64 *start, u64 max_bytes, unsigned long bits);
u64 *start, u64 search_end,
u64 max_bytes, unsigned long bits);
int test_range_bit(struct extent_map_tree *tree, u64 start, u64 end,
int bits, int filled);
......
......@@ -307,6 +307,7 @@ static int dirty_and_release_pages(struct btrfs_trans_handle *trans,
inline_size > 32768 ||
inline_size >= BTRFS_MAX_INLINE_DATA_SIZE(root)) {
u64 last_end;
u64 existing_delalloc = 0;
for (i = 0; i < num_pages; i++) {
struct page *p = pages[i];
......@@ -316,8 +317,19 @@ static int dirty_and_release_pages(struct btrfs_trans_handle *trans,
last_end = (u64)(pages[num_pages -1]->index) <<
PAGE_CACHE_SHIFT;
last_end += PAGE_CACHE_SIZE - 1;
if (start_pos < isize) {
u64 delalloc_start = start_pos;
existing_delalloc = count_range_bits(em_tree,
&delalloc_start,
end_of_last_block, (u64)-1,
EXTENT_DELALLOC);
}
set_extent_delalloc(em_tree, start_pos, end_of_last_block,
GFP_NOFS);
spin_lock(&root->fs_info->delalloc_lock);
root->fs_info->delalloc_bytes += (end_of_last_block + 1 -
start_pos) - existing_delalloc;
spin_unlock(&root->fs_info->delalloc_lock);
} else {
u64 aligned_end;
/* step one, delete the existing extents in this range */
......@@ -708,12 +720,12 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
current->backing_dev_info = inode->i_mapping->backing_dev_info;
err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode));
if (err)
goto out;
goto out_nolock;
if (count == 0)
goto out;
goto out_nolock;
err = remove_suid(fdentry(file));
if (err)
goto out;
goto out_nolock;
file_update_time(file);
pages = kmalloc(nrptrs * sizeof(struct page *), GFP_KERNEL);
......@@ -758,6 +770,13 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
WARN_ON(num_pages > nrptrs);
memset(pages, 0, sizeof(pages));
mutex_lock(&root->fs_info->fs_mutex);
ret = btrfs_check_free_space(root, write_bytes, 0);
mutex_unlock(&root->fs_info->fs_mutex);
if (ret)
goto out;
ret = prepare_pages(root, file, pages, num_pages,
pos, first_index, last_index,
write_bytes);
......@@ -787,8 +806,9 @@ static ssize_t btrfs_file_write(struct file *file, const char __user *buf,
btrfs_btree_balance_dirty(root, 1);
cond_resched();
}
mutex_unlock(&inode->i_mutex);
out:
mutex_unlock(&inode->i_mutex);
out_nolock:
kfree(pages);
if (pinned[0])
page_cache_release(pinned[0]);
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment