Commit 72055425 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs

Pull btrfs update from Chris Mason:
 "This is a large pull, with the bulk of the updates coming from:

   - Hole punching

   - send/receive fixes

   - fsync performance

   - Disk format extension allowing more hardlinks inside a single
     directory (btrfs-progs patch required to enable the compat bit for
     this one)

  I'm cooking more unrelated RAID code, but I wanted to make sure this
  original batch makes it in.  The largest updates here are relatively
  old and have been in testing for some time."

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (121 commits)
  btrfs: init ref_index to zero in add_inode_ref
  Btrfs: remove repeated eb->pages check in, disk-io.c/csum_dirty_buffer
  Btrfs: fix page leakage
  Btrfs: do not warn_on when we cannot alloc a page for an extent buffer
  Btrfs: don't bug on enomem in readpage
  Btrfs: cleanup pages properly when ENOMEM in compression
  Btrfs: make filesystem read-only when submitting barrier fails
  Btrfs: detect corrupted filesystem after write I/O errors
  Btrfs: make compress and nodatacow mount options mutually exclusive
  btrfs: fix message printing
  Btrfs: don't bother committing delayed inode updates when fsyncing
  btrfs: move inline function code to header file
  Btrfs: remove unnecessary IS_ERR in bio_readpage_error()
  btrfs: remove unused function btrfs_insert_some_items()
  Btrfs: don't commit instead of overcommitting
  Btrfs: confirmation of value is added before trace_btrfs_get_extent() is called
  Btrfs: be smarter about dropping things from the tree log
  Btrfs: don't lookup csums for prealloc extents
  Btrfs: cache extent state when writing out dirty metadata pages
  Btrfs: do not hold the file extent leaf locked when adding extent item
  ...
parents fc81c038 f46dbe3d
This diff is collapsed.
...@@ -33,14 +33,13 @@ struct inode_fs_paths { ...@@ -33,14 +33,13 @@ struct inode_fs_paths {
typedef int (iterate_extent_inodes_t)(u64 inum, u64 offset, u64 root, typedef int (iterate_extent_inodes_t)(u64 inum, u64 offset, u64 root,
void *ctx); void *ctx);
typedef int (iterate_irefs_t)(u64 parent, struct btrfs_inode_ref *iref,
struct extent_buffer *eb, void *ctx);
int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root, int inode_item_info(u64 inum, u64 ioff, struct btrfs_root *fs_root,
struct btrfs_path *path); struct btrfs_path *path);
int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical, int extent_from_logical(struct btrfs_fs_info *fs_info, u64 logical,
struct btrfs_path *path, struct btrfs_key *found_key); struct btrfs_path *path, struct btrfs_key *found_key,
u64 *flags);
int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb, int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
struct btrfs_extent_item *ei, u32 item_size, struct btrfs_extent_item *ei, u32 item_size,
...@@ -69,4 +68,9 @@ struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root, ...@@ -69,4 +68,9 @@ struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
struct btrfs_path *path); struct btrfs_path *path);
void free_ipath(struct inode_fs_paths *ipath); void free_ipath(struct inode_fs_paths *ipath);
int btrfs_find_one_extref(struct btrfs_root *root, u64 inode_objectid,
u64 start_off, struct btrfs_path *path,
struct btrfs_inode_extref **ret_extref,
u64 *found_off);
#endif #endif
...@@ -38,6 +38,7 @@ ...@@ -38,6 +38,7 @@
#define BTRFS_INODE_DELALLOC_META_RESERVED 4 #define BTRFS_INODE_DELALLOC_META_RESERVED 4
#define BTRFS_INODE_HAS_ORPHAN_ITEM 5 #define BTRFS_INODE_HAS_ORPHAN_ITEM 5
#define BTRFS_INODE_HAS_ASYNC_EXTENT 6 #define BTRFS_INODE_HAS_ASYNC_EXTENT 6
#define BTRFS_INODE_NEEDS_FULL_SYNC 7
/* in memory btrfs inode */ /* in memory btrfs inode */
struct btrfs_inode { struct btrfs_inode {
...@@ -143,6 +144,9 @@ struct btrfs_inode { ...@@ -143,6 +144,9 @@ struct btrfs_inode {
/* flags field from the on disk inode */ /* flags field from the on disk inode */
u32 flags; u32 flags;
/* a local copy of root's last_log_commit */
unsigned long last_log_commit;
/* /*
* Counters to keep track of the number of extent item's we may use due * Counters to keep track of the number of extent item's we may use due
* to delalloc and such. outstanding_extents is the number of extent * to delalloc and such. outstanding_extents is the number of extent
...@@ -202,15 +206,10 @@ static inline bool btrfs_is_free_space_inode(struct inode *inode) ...@@ -202,15 +206,10 @@ static inline bool btrfs_is_free_space_inode(struct inode *inode)
static inline int btrfs_inode_in_log(struct inode *inode, u64 generation) static inline int btrfs_inode_in_log(struct inode *inode, u64 generation)
{ {
struct btrfs_root *root = BTRFS_I(inode)->root;
int ret = 0;
mutex_lock(&root->log_mutex);
if (BTRFS_I(inode)->logged_trans == generation && if (BTRFS_I(inode)->logged_trans == generation &&
BTRFS_I(inode)->last_sub_trans <= root->last_log_commit) BTRFS_I(inode)->last_sub_trans <= BTRFS_I(inode)->last_log_commit)
ret = 1; return 1;
mutex_unlock(&root->log_mutex); return 0;
return ret;
} }
#endif #endif
...@@ -37,8 +37,9 @@ ...@@ -37,8 +37,9 @@
* the file system was mounted, (i.e., they have been * the file system was mounted, (i.e., they have been
* referenced by the super block) or they have been * referenced by the super block) or they have been
* written since then and the write completion callback * written since then and the write completion callback
* was called and a FLUSH request to the device where * was called and no write error was indicated and a
* these blocks are located was received and completed. * FLUSH request to the device where these blocks are
* located was received and completed.
* 2b. All referenced blocks need to have a generation * 2b. All referenced blocks need to have a generation
* number which is equal to the parent's number. * number which is equal to the parent's number.
* *
...@@ -2601,6 +2602,17 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state, ...@@ -2601,6 +2602,17 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
(unsigned long long)l->block_ref_to->dev_bytenr, (unsigned long long)l->block_ref_to->dev_bytenr,
l->block_ref_to->mirror_num); l->block_ref_to->mirror_num);
ret = -1; ret = -1;
} else if (l->block_ref_to->iodone_w_error) {
printk(KERN_INFO "btrfs: attempt to write superblock"
" which references block %c @%llu (%s/%llu/%d)"
" which has write error!\n",
btrfsic_get_block_type(state, l->block_ref_to),
(unsigned long long)
l->block_ref_to->logical_bytenr,
l->block_ref_to->dev_state->name,
(unsigned long long)l->block_ref_to->dev_bytenr,
l->block_ref_to->mirror_num);
ret = -1;
} else if (l->parent_generation != } else if (l->parent_generation !=
l->block_ref_to->generation && l->block_ref_to->generation &&
BTRFSIC_GENERATION_UNKNOWN != BTRFSIC_GENERATION_UNKNOWN !=
......
...@@ -577,6 +577,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -577,6 +577,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
u64 em_start; u64 em_start;
struct extent_map *em; struct extent_map *em;
int ret = -ENOMEM; int ret = -ENOMEM;
int faili = 0;
u32 *sums; u32 *sums;
tree = &BTRFS_I(inode)->io_tree; tree = &BTRFS_I(inode)->io_tree;
...@@ -626,9 +627,13 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -626,9 +627,13 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
for (pg_index = 0; pg_index < nr_pages; pg_index++) { for (pg_index = 0; pg_index < nr_pages; pg_index++) {
cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS | cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
__GFP_HIGHMEM); __GFP_HIGHMEM);
if (!cb->compressed_pages[pg_index]) if (!cb->compressed_pages[pg_index]) {
faili = pg_index - 1;
ret = -ENOMEM;
goto fail2; goto fail2;
}
} }
faili = nr_pages - 1;
cb->nr_pages = nr_pages; cb->nr_pages = nr_pages;
add_ra_bio_pages(inode, em_start + em_len, cb); add_ra_bio_pages(inode, em_start + em_len, cb);
...@@ -713,8 +718,10 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, ...@@ -713,8 +718,10 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
return 0; return 0;
fail2: fail2:
for (pg_index = 0; pg_index < nr_pages; pg_index++) while (faili >= 0) {
free_page((unsigned long)cb->compressed_pages[pg_index]); __free_page(cb->compressed_pages[faili]);
faili--;
}
kfree(cb->compressed_pages); kfree(cb->compressed_pages);
fail1: fail1:
......
...@@ -4401,149 +4401,6 @@ void btrfs_extend_item(struct btrfs_trans_handle *trans, ...@@ -4401,149 +4401,6 @@ void btrfs_extend_item(struct btrfs_trans_handle *trans,
} }
} }
/*
* Given a key and some data, insert items into the tree.
* This does all the path init required, making room in the tree if needed.
* Returns the number of keys that were inserted.
*/
int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
struct btrfs_key *cpu_key, u32 *data_size,
int nr)
{
struct extent_buffer *leaf;
struct btrfs_item *item;
int ret = 0;
int slot;
int i;
u32 nritems;
u32 total_data = 0;
u32 total_size = 0;
unsigned int data_end;
struct btrfs_disk_key disk_key;
struct btrfs_key found_key;
struct btrfs_map_token token;
btrfs_init_map_token(&token);
for (i = 0; i < nr; i++) {
if (total_size + data_size[i] + sizeof(struct btrfs_item) >
BTRFS_LEAF_DATA_SIZE(root)) {
break;
nr = i;
}
total_data += data_size[i];
total_size += data_size[i] + sizeof(struct btrfs_item);
}
BUG_ON(nr == 0);
ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
if (ret == 0)
return -EEXIST;
if (ret < 0)
goto out;
leaf = path->nodes[0];
nritems = btrfs_header_nritems(leaf);
data_end = leaf_data_end(root, leaf);
if (btrfs_leaf_free_space(root, leaf) < total_size) {
for (i = nr; i >= 0; i--) {
total_data -= data_size[i];
total_size -= data_size[i] + sizeof(struct btrfs_item);
if (total_size < btrfs_leaf_free_space(root, leaf))
break;
}
nr = i;
}
slot = path->slots[0];
BUG_ON(slot < 0);
if (slot != nritems) {
unsigned int old_data = btrfs_item_end_nr(leaf, slot);
item = btrfs_item_nr(leaf, slot);
btrfs_item_key_to_cpu(leaf, &found_key, slot);
/* figure out how many keys we can insert in here */
total_data = data_size[0];
for (i = 1; i < nr; i++) {
if (btrfs_comp_cpu_keys(&found_key, cpu_key + i) <= 0)
break;
total_data += data_size[i];
}
nr = i;
if (old_data < data_end) {
btrfs_print_leaf(root, leaf);
printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
slot, old_data, data_end);
BUG_ON(1);
}
/*
* item0..itemN ... dataN.offset..dataN.size .. data0.size
*/
/* first correct the data pointers */
for (i = slot; i < nritems; i++) {
u32 ioff;
item = btrfs_item_nr(leaf, i);
ioff = btrfs_token_item_offset(leaf, item, &token);
btrfs_set_token_item_offset(leaf, item,
ioff - total_data, &token);
}
/* shift the items */
memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
btrfs_item_nr_offset(slot),
(nritems - slot) * sizeof(struct btrfs_item));
/* shift the data */
memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
data_end - total_data, btrfs_leaf_data(leaf) +
data_end, old_data - data_end);
data_end = old_data;
} else {
/*
* this sucks but it has to be done, if we are inserting at
* the end of the leaf only insert 1 of the items, since we
* have no way of knowing whats on the next leaf and we'd have
* to drop our current locks to figure it out
*/
nr = 1;
}
/* setup the item for the new data */
for (i = 0; i < nr; i++) {
btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
btrfs_set_item_key(leaf, &disk_key, slot + i);
item = btrfs_item_nr(leaf, slot + i);
btrfs_set_token_item_offset(leaf, item,
data_end - data_size[i], &token);
data_end -= data_size[i];
btrfs_set_token_item_size(leaf, item, data_size[i], &token);
}
btrfs_set_header_nritems(leaf, nritems + nr);
btrfs_mark_buffer_dirty(leaf);
ret = 0;
if (slot == 0) {
btrfs_cpu_key_to_disk(&disk_key, cpu_key);
fixup_low_keys(trans, root, path, &disk_key, 1);
}
if (btrfs_leaf_free_space(root, leaf) < 0) {
btrfs_print_leaf(root, leaf);
BUG();
}
out:
if (!ret)
ret = nr;
return ret;
}
/* /*
* this is a helper for btrfs_insert_empty_items, the main goal here is * this is a helper for btrfs_insert_empty_items, the main goal here is
* to save stack depth by doing the bulk of the work in a function * to save stack depth by doing the bulk of the work in a function
...@@ -5073,6 +4930,7 @@ static void tree_move_down(struct btrfs_root *root, ...@@ -5073,6 +4930,7 @@ static void tree_move_down(struct btrfs_root *root,
struct btrfs_path *path, struct btrfs_path *path,
int *level, int root_level) int *level, int root_level)
{ {
BUG_ON(*level == 0);
path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level], path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
path->slots[*level]); path->slots[*level]);
path->slots[*level - 1] = 0; path->slots[*level - 1] = 0;
...@@ -5089,7 +4947,7 @@ static int tree_move_next_or_upnext(struct btrfs_root *root, ...@@ -5089,7 +4947,7 @@ static int tree_move_next_or_upnext(struct btrfs_root *root,
path->slots[*level]++; path->slots[*level]++;
while (path->slots[*level] == nritems) { while (path->slots[*level] >= nritems) {
if (*level == root_level) if (*level == root_level)
return -1; return -1;
...@@ -5433,9 +5291,11 @@ int btrfs_compare_trees(struct btrfs_root *left_root, ...@@ -5433,9 +5291,11 @@ int btrfs_compare_trees(struct btrfs_root *left_root,
goto out; goto out;
advance_right = ADVANCE; advance_right = ADVANCE;
} else { } else {
WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
ret = tree_compare_item(left_root, left_path, ret = tree_compare_item(left_root, left_path,
right_path, tmp_buf); right_path, tmp_buf);
if (ret) { if (ret) {
WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
ret = changed_cb(left_root, right_root, ret = changed_cb(left_root, right_root,
left_path, right_path, left_path, right_path,
&left_key, &left_key,
......
...@@ -154,6 +154,13 @@ struct btrfs_ordered_sum; ...@@ -154,6 +154,13 @@ struct btrfs_ordered_sum;
*/ */
#define BTRFS_NAME_LEN 255 #define BTRFS_NAME_LEN 255
/*
* Theoretical limit is larger, but we keep this down to a sane
* value. That should limit greatly the possibility of collisions on
* inode ref items.
*/
#define BTRFS_LINK_MAX 65535U
/* 32 bytes in various csum fields */ /* 32 bytes in various csum fields */
#define BTRFS_CSUM_SIZE 32 #define BTRFS_CSUM_SIZE 32
...@@ -489,6 +496,8 @@ struct btrfs_super_block { ...@@ -489,6 +496,8 @@ struct btrfs_super_block {
*/ */
#define BTRFS_FEATURE_INCOMPAT_BIG_METADATA (1ULL << 5) #define BTRFS_FEATURE_INCOMPAT_BIG_METADATA (1ULL << 5)
#define BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF (1ULL << 6)
#define BTRFS_FEATURE_COMPAT_SUPP 0ULL #define BTRFS_FEATURE_COMPAT_SUPP 0ULL
#define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL #define BTRFS_FEATURE_COMPAT_RO_SUPP 0ULL
#define BTRFS_FEATURE_INCOMPAT_SUPP \ #define BTRFS_FEATURE_INCOMPAT_SUPP \
...@@ -496,7 +505,8 @@ struct btrfs_super_block { ...@@ -496,7 +505,8 @@ struct btrfs_super_block {
BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \ BTRFS_FEATURE_INCOMPAT_DEFAULT_SUBVOL | \
BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \ BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \
BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \ BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \
BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO) BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \
BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
/* /*
* A leaf is full of items. offset and size tell us where to find * A leaf is full of items. offset and size tell us where to find
...@@ -643,6 +653,14 @@ struct btrfs_inode_ref { ...@@ -643,6 +653,14 @@ struct btrfs_inode_ref {
/* name goes here */ /* name goes here */
} __attribute__ ((__packed__)); } __attribute__ ((__packed__));
struct btrfs_inode_extref {
__le64 parent_objectid;
__le64 index;
__le16 name_len;
__u8 name[0];
/* name goes here */
} __attribute__ ((__packed__));
struct btrfs_timespec { struct btrfs_timespec {
__le64 sec; __le64 sec;
__le32 nsec; __le32 nsec;
...@@ -1028,12 +1046,22 @@ struct btrfs_space_info { ...@@ -1028,12 +1046,22 @@ struct btrfs_space_info {
wait_queue_head_t wait; wait_queue_head_t wait;
}; };
#define BTRFS_BLOCK_RSV_GLOBAL 1
#define BTRFS_BLOCK_RSV_DELALLOC 2
#define BTRFS_BLOCK_RSV_TRANS 3
#define BTRFS_BLOCK_RSV_CHUNK 4
#define BTRFS_BLOCK_RSV_DELOPS 5
#define BTRFS_BLOCK_RSV_EMPTY 6
#define BTRFS_BLOCK_RSV_TEMP 7
struct btrfs_block_rsv { struct btrfs_block_rsv {
u64 size; u64 size;
u64 reserved; u64 reserved;
struct btrfs_space_info *space_info; struct btrfs_space_info *space_info;
spinlock_t lock; spinlock_t lock;
unsigned int full; unsigned short full;
unsigned short type;
unsigned short failfast;
}; };
/* /*
...@@ -1127,6 +1155,9 @@ struct btrfs_block_group_cache { ...@@ -1127,6 +1155,9 @@ struct btrfs_block_group_cache {
* Today it will only have one thing on it, but that may change * Today it will only have one thing on it, but that may change
*/ */
struct list_head cluster_list; struct list_head cluster_list;
/* For delayed block group creation */
struct list_head new_bg_list;
}; };
/* delayed seq elem */ /* delayed seq elem */
...@@ -1240,7 +1271,6 @@ struct btrfs_fs_info { ...@@ -1240,7 +1271,6 @@ struct btrfs_fs_info {
struct mutex reloc_mutex; struct mutex reloc_mutex;
struct list_head trans_list; struct list_head trans_list;
struct list_head hashers;
struct list_head dead_roots; struct list_head dead_roots;
struct list_head caching_block_groups; struct list_head caching_block_groups;
...@@ -1366,9 +1396,6 @@ struct btrfs_fs_info { ...@@ -1366,9 +1396,6 @@ struct btrfs_fs_info {
struct rb_root defrag_inodes; struct rb_root defrag_inodes;
atomic_t defrag_running; atomic_t defrag_running;
spinlock_t ref_cache_lock;
u64 total_ref_cache_size;
/* /*
* these three are in extended format (availability of single * these three are in extended format (availability of single
* chunks is denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other * chunks is denoted by BTRFS_AVAIL_ALLOC_BIT_SINGLE bit, other
...@@ -1441,6 +1468,8 @@ struct btrfs_fs_info { ...@@ -1441,6 +1468,8 @@ struct btrfs_fs_info {
/* next backup root to be overwritten */ /* next backup root to be overwritten */
int backup_root_index; int backup_root_index;
int num_tolerated_disk_barrier_failures;
}; };
/* /*
...@@ -1481,9 +1510,9 @@ struct btrfs_root { ...@@ -1481,9 +1510,9 @@ struct btrfs_root {
wait_queue_head_t log_commit_wait[2]; wait_queue_head_t log_commit_wait[2];
atomic_t log_writers; atomic_t log_writers;
atomic_t log_commit[2]; atomic_t log_commit[2];
atomic_t log_batch;
unsigned long log_transid; unsigned long log_transid;
unsigned long last_log_commit; unsigned long last_log_commit;
unsigned long log_batch;
pid_t log_start_pid; pid_t log_start_pid;
bool log_multiple_pids; bool log_multiple_pids;
...@@ -1592,6 +1621,7 @@ struct btrfs_ioctl_defrag_range_args { ...@@ -1592,6 +1621,7 @@ struct btrfs_ioctl_defrag_range_args {
*/ */
#define BTRFS_INODE_ITEM_KEY 1 #define BTRFS_INODE_ITEM_KEY 1
#define BTRFS_INODE_REF_KEY 12 #define BTRFS_INODE_REF_KEY 12
#define BTRFS_INODE_EXTREF_KEY 13
#define BTRFS_XATTR_ITEM_KEY 24 #define BTRFS_XATTR_ITEM_KEY 24
#define BTRFS_ORPHAN_ITEM_KEY 48 #define BTRFS_ORPHAN_ITEM_KEY 48
/* reserve 2-15 close to the inode for later flexibility */ /* reserve 2-15 close to the inode for later flexibility */
...@@ -1978,6 +2008,13 @@ BTRFS_SETGET_STACK_FUNCS(block_group_flags, ...@@ -1978,6 +2008,13 @@ BTRFS_SETGET_STACK_FUNCS(block_group_flags,
BTRFS_SETGET_FUNCS(inode_ref_name_len, struct btrfs_inode_ref, name_len, 16); BTRFS_SETGET_FUNCS(inode_ref_name_len, struct btrfs_inode_ref, name_len, 16);
BTRFS_SETGET_FUNCS(inode_ref_index, struct btrfs_inode_ref, index, 64); BTRFS_SETGET_FUNCS(inode_ref_index, struct btrfs_inode_ref, index, 64);
/* struct btrfs_inode_extref */
BTRFS_SETGET_FUNCS(inode_extref_parent, struct btrfs_inode_extref,
parent_objectid, 64);
BTRFS_SETGET_FUNCS(inode_extref_name_len, struct btrfs_inode_extref,
name_len, 16);
BTRFS_SETGET_FUNCS(inode_extref_index, struct btrfs_inode_extref, index, 64);
/* struct btrfs_inode_item */ /* struct btrfs_inode_item */
BTRFS_SETGET_FUNCS(inode_generation, struct btrfs_inode_item, generation, 64); BTRFS_SETGET_FUNCS(inode_generation, struct btrfs_inode_item, generation, 64);
BTRFS_SETGET_FUNCS(inode_sequence, struct btrfs_inode_item, sequence, 64); BTRFS_SETGET_FUNCS(inode_sequence, struct btrfs_inode_item, sequence, 64);
...@@ -2858,6 +2895,8 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans, ...@@ -2858,6 +2895,8 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
u64 size); u64 size);
int btrfs_remove_block_group(struct btrfs_trans_handle *trans, int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 group_start); struct btrfs_root *root, u64 group_start);
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
struct btrfs_root *root);
u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags); u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags);
u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data); u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data);
void btrfs_clear_space_info_full(struct btrfs_fs_info *info); void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
...@@ -2874,8 +2913,9 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes); ...@@ -2874,8 +2913,9 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes);
void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes); void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes);
int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes); int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes);
void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes); void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes);
void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv); void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type);
struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root); struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
unsigned short type);
void btrfs_free_block_rsv(struct btrfs_root *root, void btrfs_free_block_rsv(struct btrfs_root *root,
struct btrfs_block_rsv *rsv); struct btrfs_block_rsv *rsv);
int btrfs_block_rsv_add(struct btrfs_root *root, int btrfs_block_rsv_add(struct btrfs_root *root,
...@@ -3172,12 +3212,12 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, ...@@ -3172,12 +3212,12 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
const char *name, int name_len, const char *name, int name_len,
u64 inode_objectid, u64 ref_objectid, u64 *index); u64 inode_objectid, u64 ref_objectid, u64 *index);
struct btrfs_inode_ref * int btrfs_get_inode_ref_index(struct btrfs_trans_handle *trans,
btrfs_lookup_inode_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_root *root, struct btrfs_path *path,
struct btrfs_path *path, const char *name, int name_len,
const char *name, int name_len, u64 inode_objectid, u64 ref_objectid, int mod,
u64 inode_objectid, u64 ref_objectid, int mod); u64 *ret_index);
int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans, int btrfs_insert_empty_inode(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
struct btrfs_path *path, u64 objectid); struct btrfs_path *path, u64 objectid);
...@@ -3185,6 +3225,19 @@ int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root ...@@ -3185,6 +3225,19 @@ int btrfs_lookup_inode(struct btrfs_trans_handle *trans, struct btrfs_root
*root, struct btrfs_path *path, *root, struct btrfs_path *path,
struct btrfs_key *location, int mod); struct btrfs_key *location, int mod);
struct btrfs_inode_extref *
btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
const char *name, int name_len,
u64 inode_objectid, u64 ref_objectid, int ins_len,
int cow);
int btrfs_find_name_in_ext_backref(struct btrfs_path *path,
u64 ref_objectid, const char *name,
int name_len,
struct btrfs_inode_extref **extref_ret);
/* file-item.c */ /* file-item.c */
int btrfs_del_csums(struct btrfs_trans_handle *trans, int btrfs_del_csums(struct btrfs_trans_handle *trans,
struct btrfs_root *root, u64 bytenr, u64 len); struct btrfs_root *root, u64 bytenr, u64 len);
...@@ -3249,6 +3302,8 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans, ...@@ -3249,6 +3302,8 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
struct inode *dir, u64 objectid, struct inode *dir, u64 objectid,
const char *name, int name_len); const char *name, int name_len);
int btrfs_truncate_page(struct inode *inode, loff_t from, loff_t len,
int front);
int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans, int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
struct inode *inode, u64 new_size, struct inode *inode, u64 new_size,
...@@ -3308,16 +3363,27 @@ void btrfs_inherit_iflags(struct inode *inode, struct inode *dir); ...@@ -3308,16 +3363,27 @@ void btrfs_inherit_iflags(struct inode *inode, struct inode *dir);
int btrfs_defrag_file(struct inode *inode, struct file *file, int btrfs_defrag_file(struct inode *inode, struct file *file,
struct btrfs_ioctl_defrag_range_args *range, struct btrfs_ioctl_defrag_range_args *range,
u64 newer_than, unsigned long max_pages); u64 newer_than, unsigned long max_pages);
void btrfs_get_block_group_info(struct list_head *groups_list,
struct btrfs_ioctl_space_info *space);
/* file.c */ /* file.c */
int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
struct inode *inode); struct inode *inode);
int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info); int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info);
int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync); int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync);
int btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end, void btrfs_drop_extent_cache(struct inode *inode, u64 start, u64 end,
int skip_pinned); int skip_pinned);
int btrfs_replace_extent_cache(struct inode *inode, struct extent_map *replace,
u64 start, u64 end, int skip_pinned,
int modified);
extern const struct file_operations btrfs_file_operations; extern const struct file_operations btrfs_file_operations;
int btrfs_drop_extents(struct btrfs_trans_handle *trans, struct inode *inode, int __btrfs_drop_extents(struct btrfs_trans_handle *trans,
u64 start, u64 end, u64 *hint_byte, int drop_cache); struct btrfs_root *root, struct inode *inode,
struct btrfs_path *path, u64 start, u64 end,
u64 *drop_end, int drop_cache);
int btrfs_drop_extents(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct inode *inode, u64 start,
u64 end, int drop_cache);
int btrfs_mark_extent_written(struct btrfs_trans_handle *trans, int btrfs_mark_extent_written(struct btrfs_trans_handle *trans,
struct inode *inode, u64 start, u64 end); struct inode *inode, u64 start, u64 end);
int btrfs_release_file(struct inode *inode, struct file *file); int btrfs_release_file(struct inode *inode, struct file *file);
...@@ -3378,6 +3444,11 @@ static inline void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info, ...@@ -3378,6 +3444,11 @@ static inline void __btrfs_set_fs_incompat(struct btrfs_fs_info *fs_info,
} }
} }
/*
* Call btrfs_abort_transaction as early as possible when an error condition is
* detected, that way the exact line number is reported.
*/
#define btrfs_abort_transaction(trans, root, errno) \ #define btrfs_abort_transaction(trans, root, errno) \
do { \ do { \
__btrfs_abort_transaction(trans, root, __func__, \ __btrfs_abort_transaction(trans, root, __func__, \
......
...@@ -29,7 +29,7 @@ static struct kmem_cache *delayed_node_cache; ...@@ -29,7 +29,7 @@ static struct kmem_cache *delayed_node_cache;
int __init btrfs_delayed_inode_init(void) int __init btrfs_delayed_inode_init(void)
{ {
delayed_node_cache = kmem_cache_create("delayed_node", delayed_node_cache = kmem_cache_create("btrfs_delayed_node",
sizeof(struct btrfs_delayed_node), sizeof(struct btrfs_delayed_node),
0, 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
...@@ -650,7 +650,7 @@ static int btrfs_delayed_inode_reserve_metadata( ...@@ -650,7 +650,7 @@ static int btrfs_delayed_inode_reserve_metadata(
* we're accounted for. * we're accounted for.
*/ */
if (!src_rsv || (!trans->bytes_reserved && if (!src_rsv || (!trans->bytes_reserved &&
src_rsv != &root->fs_info->delalloc_block_rsv)) { src_rsv->type != BTRFS_BLOCK_RSV_DELALLOC)) {
ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes); ret = btrfs_block_rsv_add_noflush(root, dst_rsv, num_bytes);
/* /*
* Since we're under a transaction reserve_metadata_bytes could * Since we're under a transaction reserve_metadata_bytes could
...@@ -668,7 +668,7 @@ static int btrfs_delayed_inode_reserve_metadata( ...@@ -668,7 +668,7 @@ static int btrfs_delayed_inode_reserve_metadata(
num_bytes, 1); num_bytes, 1);
} }
return ret; return ret;
} else if (src_rsv == &root->fs_info->delalloc_block_rsv) { } else if (src_rsv->type == BTRFS_BLOCK_RSV_DELALLOC) {
spin_lock(&BTRFS_I(inode)->lock); spin_lock(&BTRFS_I(inode)->lock);
if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED, if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
&BTRFS_I(inode)->runtime_flags)) { &BTRFS_I(inode)->runtime_flags)) {
......
This diff is collapsed.
...@@ -95,6 +95,8 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans, ...@@ -95,6 +95,8 @@ struct btrfs_root *btrfs_create_tree(struct btrfs_trans_handle *trans,
u64 objectid); u64 objectid);
int btree_lock_page_hook(struct page *page, void *data, int btree_lock_page_hook(struct page *page, void *data,
void (*flush_fn)(void *)); void (*flush_fn)(void *));
int btrfs_calc_num_tolerated_disk_barrier_failures(
struct btrfs_fs_info *fs_info);
#ifdef CONFIG_DEBUG_LOCK_ALLOC #ifdef CONFIG_DEBUG_LOCK_ALLOC
void btrfs_init_lockdep(void); void btrfs_init_lockdep(void);
......
This diff is collapsed.
This diff is collapsed.
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
* type for this bio * type for this bio
*/ */
#define EXTENT_BIO_COMPRESSED 1 #define EXTENT_BIO_COMPRESSED 1
#define EXTENT_BIO_TREE_LOG 2
#define EXTENT_BIO_FLAG_SHIFT 16 #define EXTENT_BIO_FLAG_SHIFT 16
/* these are bit numbers for test/set bit */ /* these are bit numbers for test/set bit */
...@@ -232,11 +233,15 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, ...@@ -232,11 +233,15 @@ int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, int clear_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
gfp_t mask); gfp_t mask);
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end, int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
int bits, int clear_bits, gfp_t mask); int bits, int clear_bits,
struct extent_state **cached_state, gfp_t mask);
int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end, int set_extent_delalloc(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached_state, gfp_t mask); struct extent_state **cached_state, gfp_t mask);
int set_extent_defrag(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached_state, gfp_t mask);
int find_first_extent_bit(struct extent_io_tree *tree, u64 start, int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
u64 *start_ret, u64 *end_ret, int bits); u64 *start_ret, u64 *end_ret, int bits,
struct extent_state **cached_state);
struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree, struct extent_state *find_first_extent_bit_state(struct extent_io_tree *tree,
u64 start, int bits); u64 start, int bits);
int extent_invalidatepage(struct extent_io_tree *tree, int extent_invalidatepage(struct extent_io_tree *tree,
...@@ -277,8 +282,18 @@ void free_extent_buffer_stale(struct extent_buffer *eb); ...@@ -277,8 +282,18 @@ void free_extent_buffer_stale(struct extent_buffer *eb);
int read_extent_buffer_pages(struct extent_io_tree *tree, int read_extent_buffer_pages(struct extent_io_tree *tree,
struct extent_buffer *eb, u64 start, int wait, struct extent_buffer *eb, u64 start, int wait,
get_extent_t *get_extent, int mirror_num); get_extent_t *get_extent, int mirror_num);
unsigned long num_extent_pages(u64 start, u64 len);
struct page *extent_buffer_page(struct extent_buffer *eb, unsigned long i); static inline unsigned long num_extent_pages(u64 start, u64 len)
{
return ((start + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT) -
(start >> PAGE_CACHE_SHIFT);
}
static inline struct page *extent_buffer_page(struct extent_buffer *eb,
unsigned long i)
{
return eb->pages[i];
}
static inline void extent_buffer_get(struct extent_buffer *eb) static inline void extent_buffer_get(struct extent_buffer *eb)
{ {
......
...@@ -11,7 +11,7 @@ static struct kmem_cache *extent_map_cache; ...@@ -11,7 +11,7 @@ static struct kmem_cache *extent_map_cache;
int __init extent_map_init(void) int __init extent_map_init(void)
{ {
extent_map_cache = kmem_cache_create("extent_map", extent_map_cache = kmem_cache_create("btrfs_extent_map",
sizeof(struct extent_map), 0, sizeof(struct extent_map), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL); SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD, NULL);
if (!extent_map_cache) if (!extent_map_cache)
...@@ -35,6 +35,7 @@ void extent_map_exit(void) ...@@ -35,6 +35,7 @@ void extent_map_exit(void)
void extent_map_tree_init(struct extent_map_tree *tree) void extent_map_tree_init(struct extent_map_tree *tree)
{ {
tree->map = RB_ROOT; tree->map = RB_ROOT;
INIT_LIST_HEAD(&tree->modified_extents);
rwlock_init(&tree->lock); rwlock_init(&tree->lock);
} }
...@@ -54,7 +55,9 @@ struct extent_map *alloc_extent_map(void) ...@@ -54,7 +55,9 @@ struct extent_map *alloc_extent_map(void)
em->in_tree = 0; em->in_tree = 0;
em->flags = 0; em->flags = 0;
em->compress_type = BTRFS_COMPRESS_NONE; em->compress_type = BTRFS_COMPRESS_NONE;
em->generation = 0;
atomic_set(&em->refs, 1); atomic_set(&em->refs, 1);
INIT_LIST_HEAD(&em->list);
return em; return em;
} }
...@@ -72,6 +75,7 @@ void free_extent_map(struct extent_map *em) ...@@ -72,6 +75,7 @@ void free_extent_map(struct extent_map *em)
WARN_ON(atomic_read(&em->refs) == 0); WARN_ON(atomic_read(&em->refs) == 0);
if (atomic_dec_and_test(&em->refs)) { if (atomic_dec_and_test(&em->refs)) {
WARN_ON(em->in_tree); WARN_ON(em->in_tree);
WARN_ON(!list_empty(&em->list));
kmem_cache_free(extent_map_cache, em); kmem_cache_free(extent_map_cache, em);
} }
} }
...@@ -198,6 +202,14 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em) ...@@ -198,6 +202,14 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
em->block_len += merge->block_len; em->block_len += merge->block_len;
em->block_start = merge->block_start; em->block_start = merge->block_start;
merge->in_tree = 0; merge->in_tree = 0;
if (merge->generation > em->generation) {
em->mod_start = em->start;
em->mod_len = em->len;
em->generation = merge->generation;
list_move(&em->list, &tree->modified_extents);
}
list_del_init(&merge->list);
rb_erase(&merge->rb_node, &tree->map); rb_erase(&merge->rb_node, &tree->map);
free_extent_map(merge); free_extent_map(merge);
} }
...@@ -211,14 +223,34 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em) ...@@ -211,14 +223,34 @@ static void try_merge_map(struct extent_map_tree *tree, struct extent_map *em)
em->block_len += merge->len; em->block_len += merge->len;
rb_erase(&merge->rb_node, &tree->map); rb_erase(&merge->rb_node, &tree->map);
merge->in_tree = 0; merge->in_tree = 0;
if (merge->generation > em->generation) {
em->mod_len = em->len;
em->generation = merge->generation;
list_move(&em->list, &tree->modified_extents);
}
list_del_init(&merge->list);
free_extent_map(merge); free_extent_map(merge);
} }
} }
int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len) /**
* unpint_extent_cache - unpin an extent from the cache
* @tree: tree to unpin the extent in
* @start: logical offset in the file
* @len: length of the extent
* @gen: generation that this extent has been modified in
* @prealloc: if this is set we need to clear the prealloc flag
*
* Called after an extent has been written to disk properly. Set the generation
* to the generation that actually added the file item to the inode so we know
* we need to sync this extent when we call fsync().
*/
int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len,
u64 gen)
{ {
int ret = 0; int ret = 0;
struct extent_map *em; struct extent_map *em;
bool prealloc = false;
write_lock(&tree->lock); write_lock(&tree->lock);
em = lookup_extent_mapping(tree, start, len); em = lookup_extent_mapping(tree, start, len);
...@@ -228,10 +260,24 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len) ...@@ -228,10 +260,24 @@ int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len)
if (!em) if (!em)
goto out; goto out;
list_move(&em->list, &tree->modified_extents);
em->generation = gen;
clear_bit(EXTENT_FLAG_PINNED, &em->flags); clear_bit(EXTENT_FLAG_PINNED, &em->flags);
em->mod_start = em->start;
em->mod_len = em->len;
if (test_bit(EXTENT_FLAG_PREALLOC, &em->flags)) {
prealloc = true;
clear_bit(EXTENT_FLAG_PREALLOC, &em->flags);
}
try_merge_map(tree, em); try_merge_map(tree, em);
if (prealloc) {
em->mod_start = em->start;
em->mod_len = em->len;
}
free_extent_map(em); free_extent_map(em);
out: out:
write_unlock(&tree->lock); write_unlock(&tree->lock);
...@@ -269,6 +315,9 @@ int add_extent_mapping(struct extent_map_tree *tree, ...@@ -269,6 +315,9 @@ int add_extent_mapping(struct extent_map_tree *tree,
} }
atomic_inc(&em->refs); atomic_inc(&em->refs);
em->mod_start = em->start;
em->mod_len = em->len;
try_merge_map(tree, em); try_merge_map(tree, em);
out: out:
return ret; return ret;
...@@ -358,6 +407,8 @@ int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em) ...@@ -358,6 +407,8 @@ int remove_extent_mapping(struct extent_map_tree *tree, struct extent_map *em)
WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags)); WARN_ON(test_bit(EXTENT_FLAG_PINNED, &em->flags));
rb_erase(&em->rb_node, &tree->map); rb_erase(&em->rb_node, &tree->map);
if (!test_bit(EXTENT_FLAG_LOGGING, &em->flags))
list_del_init(&em->list);
em->in_tree = 0; em->in_tree = 0;
return ret; return ret;
} }
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#define EXTENT_FLAG_COMPRESSED 1 #define EXTENT_FLAG_COMPRESSED 1
#define EXTENT_FLAG_VACANCY 2 /* no file extent item found */ #define EXTENT_FLAG_VACANCY 2 /* no file extent item found */
#define EXTENT_FLAG_PREALLOC 3 /* pre-allocated extent */ #define EXTENT_FLAG_PREALLOC 3 /* pre-allocated extent */
#define EXTENT_FLAG_LOGGING 4 /* Logging this extent */
struct extent_map { struct extent_map {
struct rb_node rb_node; struct rb_node rb_node;
...@@ -20,18 +21,23 @@ struct extent_map { ...@@ -20,18 +21,23 @@ struct extent_map {
/* all of these are in bytes */ /* all of these are in bytes */
u64 start; u64 start;
u64 len; u64 len;
u64 mod_start;
u64 mod_len;
u64 orig_start; u64 orig_start;
u64 block_start; u64 block_start;
u64 block_len; u64 block_len;
u64 generation;
unsigned long flags; unsigned long flags;
struct block_device *bdev; struct block_device *bdev;
atomic_t refs; atomic_t refs;
unsigned int in_tree; unsigned int in_tree;
unsigned int compress_type; unsigned int compress_type;
struct list_head list;
}; };
struct extent_map_tree { struct extent_map_tree {
struct rb_root map; struct rb_root map;
struct list_head modified_extents;
rwlock_t lock; rwlock_t lock;
}; };
...@@ -60,7 +66,7 @@ struct extent_map *alloc_extent_map(void); ...@@ -60,7 +66,7 @@ struct extent_map *alloc_extent_map(void);
void free_extent_map(struct extent_map *em); void free_extent_map(struct extent_map *em);
int __init extent_map_init(void); int __init extent_map_init(void);
void extent_map_exit(void); void extent_map_exit(void);
int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len); int unpin_extent_cache(struct extent_map_tree *tree, u64 start, u64 len, u64 gen);
struct extent_map *search_extent_mapping(struct extent_map_tree *tree, struct extent_map *search_extent_mapping(struct extent_map_tree *tree,
u64 start, u64 len); u64 start, u64 len);
#endif #endif
...@@ -25,11 +25,12 @@ ...@@ -25,11 +25,12 @@
#include "transaction.h" #include "transaction.h"
#include "print-tree.h" #include "print-tree.h"
#define __MAX_CSUM_ITEMS(r, size) ((((BTRFS_LEAF_DATA_SIZE(r) - \ #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \
sizeof(struct btrfs_item) * 2) / \ sizeof(struct btrfs_item) * 2) / \
size) - 1)) size) - 1))
#define MAX_CSUM_ITEMS(r, size) (min(__MAX_CSUM_ITEMS(r, size), PAGE_CACHE_SIZE)) #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
PAGE_CACHE_SIZE))
#define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \ #define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \
sizeof(struct btrfs_ordered_sum)) / \ sizeof(struct btrfs_ordered_sum)) / \
......
This diff is collapsed.
...@@ -966,7 +966,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode, ...@@ -966,7 +966,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
block_group->key.offset)) { block_group->key.offset)) {
ret = find_first_extent_bit(unpin, start, ret = find_first_extent_bit(unpin, start,
&extent_start, &extent_end, &extent_start, &extent_end,
EXTENT_DIRTY); EXTENT_DIRTY, NULL);
if (ret) { if (ret) {
ret = 0; ret = 0;
break; break;
...@@ -1454,9 +1454,7 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl, ...@@ -1454,9 +1454,7 @@ static int search_bitmap(struct btrfs_free_space_ctl *ctl,
max_t(u64, *offset, bitmap_info->offset)); max_t(u64, *offset, bitmap_info->offset));
bits = bytes_to_bits(*bytes, ctl->unit); bits = bytes_to_bits(*bytes, ctl->unit);
for (i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i); for_each_set_bit_from(i, bitmap_info->bitmap, BITS_PER_BITMAP) {
i < BITS_PER_BITMAP;
i = find_next_bit(bitmap_info->bitmap, BITS_PER_BITMAP, i + 1)) {
next_zero = find_next_zero_bit(bitmap_info->bitmap, next_zero = find_next_zero_bit(bitmap_info->bitmap,
BITS_PER_BITMAP, i); BITS_PER_BITMAP, i);
if ((next_zero - i) >= bits) { if ((next_zero - i) >= bits) {
...@@ -2307,9 +2305,7 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group, ...@@ -2307,9 +2305,7 @@ static int btrfs_bitmap_cluster(struct btrfs_block_group_cache *block_group,
again: again:
found_bits = 0; found_bits = 0;
for (i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i); for_each_set_bit_from(i, entry->bitmap, BITS_PER_BITMAP) {
i < BITS_PER_BITMAP;
i = find_next_bit(entry->bitmap, BITS_PER_BITMAP, i + 1)) {
next_zero = find_next_zero_bit(entry->bitmap, next_zero = find_next_zero_bit(entry->bitmap,
BITS_PER_BITMAP, i); BITS_PER_BITMAP, i);
if (next_zero - i >= min_bits) { if (next_zero - i >= min_bits) {
......
...@@ -24,4 +24,14 @@ static inline u64 btrfs_name_hash(const char *name, int len) ...@@ -24,4 +24,14 @@ static inline u64 btrfs_name_hash(const char *name, int len)
{ {
return crc32c((u32)~1, name, len); return crc32c((u32)~1, name, len);
} }
/*
* Figure the key offset of an extended inode ref
*/
static inline u64 btrfs_extref_hash(u64 parent_objectid, const char *name,
int len)
{
return (u64) crc32c(parent_objectid, name, len);
}
#endif #endif
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include "ctree.h" #include "ctree.h"
#include "disk-io.h" #include "disk-io.h"
#include "hash.h"
#include "transaction.h" #include "transaction.h"
#include "print-tree.h" #include "print-tree.h"
...@@ -50,18 +51,57 @@ static int find_name_in_backref(struct btrfs_path *path, const char *name, ...@@ -50,18 +51,57 @@ static int find_name_in_backref(struct btrfs_path *path, const char *name,
return 0; return 0;
} }
struct btrfs_inode_ref * int btrfs_find_name_in_ext_backref(struct btrfs_path *path, u64 ref_objectid,
const char *name, int name_len,
struct btrfs_inode_extref **extref_ret)
{
struct extent_buffer *leaf;
struct btrfs_inode_extref *extref;
unsigned long ptr;
unsigned long name_ptr;
u32 item_size;
u32 cur_offset = 0;
int ref_name_len;
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
/*
* Search all extended backrefs in this item. We're only
* looking through any collisions so most of the time this is
* just going to compare against one buffer. If all is well,
* we'll return success and the inode ref object.
*/
while (cur_offset < item_size) {
extref = (struct btrfs_inode_extref *) (ptr + cur_offset);
name_ptr = (unsigned long)(&extref->name);
ref_name_len = btrfs_inode_extref_name_len(leaf, extref);
if (ref_name_len == name_len &&
btrfs_inode_extref_parent(leaf, extref) == ref_objectid &&
(memcmp_extent_buffer(leaf, name, name_ptr, name_len) == 0)) {
if (extref_ret)
*extref_ret = extref;
return 1;
}
cur_offset += ref_name_len + sizeof(*extref);
}
return 0;
}
static struct btrfs_inode_ref *
btrfs_lookup_inode_ref(struct btrfs_trans_handle *trans, btrfs_lookup_inode_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
struct btrfs_path *path, struct btrfs_path *path,
const char *name, int name_len, const char *name, int name_len,
u64 inode_objectid, u64 ref_objectid, int mod) u64 inode_objectid, u64 ref_objectid, int ins_len,
int cow)
{ {
int ret;
struct btrfs_key key; struct btrfs_key key;
struct btrfs_inode_ref *ref; struct btrfs_inode_ref *ref;
int ins_len = mod < 0 ? -1 : 0;
int cow = mod != 0;
int ret;
key.objectid = inode_objectid; key.objectid = inode_objectid;
key.type = BTRFS_INODE_REF_KEY; key.type = BTRFS_INODE_REF_KEY;
...@@ -77,10 +117,147 @@ btrfs_lookup_inode_ref(struct btrfs_trans_handle *trans, ...@@ -77,10 +117,147 @@ btrfs_lookup_inode_ref(struct btrfs_trans_handle *trans,
return ref; return ref;
} }
int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, /* Returns NULL if no extref found */
struct btrfs_inode_extref *
btrfs_lookup_inode_extref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
const char *name, int name_len,
u64 inode_objectid, u64 ref_objectid, int ins_len,
int cow)
{
int ret;
struct btrfs_key key;
struct btrfs_inode_extref *extref;
key.objectid = inode_objectid;
key.type = BTRFS_INODE_EXTREF_KEY;
key.offset = btrfs_extref_hash(ref_objectid, name, name_len);
ret = btrfs_search_slot(trans, root, &key, path, ins_len, cow);
if (ret < 0)
return ERR_PTR(ret);
if (ret > 0)
return NULL;
if (!btrfs_find_name_in_ext_backref(path, ref_objectid, name, name_len, &extref))
return NULL;
return extref;
}
int btrfs_get_inode_ref_index(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
struct btrfs_path *path,
const char *name, int name_len,
u64 inode_objectid, u64 ref_objectid, int mod,
u64 *ret_index)
{
struct btrfs_inode_ref *ref;
struct btrfs_inode_extref *extref;
int ins_len = mod < 0 ? -1 : 0;
int cow = mod != 0;
ref = btrfs_lookup_inode_ref(trans, root, path, name, name_len,
inode_objectid, ref_objectid, ins_len,
cow);
if (IS_ERR(ref))
return PTR_ERR(ref);
if (ref != NULL) {
*ret_index = btrfs_inode_ref_index(path->nodes[0], ref);
return 0;
}
btrfs_release_path(path);
extref = btrfs_lookup_inode_extref(trans, root, path, name,
name_len, inode_objectid,
ref_objectid, ins_len, cow);
if (IS_ERR(extref))
return PTR_ERR(extref);
if (extref) {
*ret_index = btrfs_inode_extref_index(path->nodes[0], extref);
return 0;
}
return -ENOENT;
}
int btrfs_del_inode_extref(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
const char *name, int name_len, const char *name, int name_len,
u64 inode_objectid, u64 ref_objectid, u64 *index) u64 inode_objectid, u64 ref_objectid, u64 *index)
{
struct btrfs_path *path;
struct btrfs_key key;
struct btrfs_inode_extref *extref;
struct extent_buffer *leaf;
int ret;
int del_len = name_len + sizeof(*extref);
unsigned long ptr;
unsigned long item_start;
u32 item_size;
key.objectid = inode_objectid;
btrfs_set_key_type(&key, BTRFS_INODE_EXTREF_KEY);
key.offset = btrfs_extref_hash(ref_objectid, name, name_len);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->leave_spinning = 1;
ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0)
ret = -ENOENT;
if (ret < 0)
goto out;
/*
* Sanity check - did we find the right item for this name?
* This should always succeed so error here will make the FS
* readonly.
*/
if (!btrfs_find_name_in_ext_backref(path, ref_objectid,
name, name_len, &extref)) {
btrfs_std_error(root->fs_info, -ENOENT);
ret = -EROFS;
goto out;
}
leaf = path->nodes[0];
item_size = btrfs_item_size_nr(leaf, path->slots[0]);
if (index)
*index = btrfs_inode_extref_index(leaf, extref);
if (del_len == item_size) {
/*
* Common case only one ref in the item, remove the
* whole item.
*/
ret = btrfs_del_item(trans, root, path);
goto out;
}
ptr = (unsigned long)extref;
item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
memmove_extent_buffer(leaf, ptr, ptr + del_len,
item_size - (ptr + del_len - item_start));
btrfs_truncate_item(trans, root, path, item_size - del_len, 1);
out:
btrfs_free_path(path);
return ret;
}
int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
const char *name, int name_len,
u64 inode_objectid, u64 ref_objectid, u64 *index)
{ {
struct btrfs_path *path; struct btrfs_path *path;
struct btrfs_key key; struct btrfs_key key;
...@@ -91,6 +268,7 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, ...@@ -91,6 +268,7 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
u32 item_size; u32 item_size;
u32 sub_item_len; u32 sub_item_len;
int ret; int ret;
int search_ext_refs = 0;
int del_len = name_len + sizeof(*ref); int del_len = name_len + sizeof(*ref);
key.objectid = inode_objectid; key.objectid = inode_objectid;
...@@ -106,12 +284,14 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, ...@@ -106,12 +284,14 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
ret = btrfs_search_slot(trans, root, &key, path, -1, 1); ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
if (ret > 0) { if (ret > 0) {
ret = -ENOENT; ret = -ENOENT;
search_ext_refs = 1;
goto out; goto out;
} else if (ret < 0) { } else if (ret < 0) {
goto out; goto out;
} }
if (!find_name_in_backref(path, name, name_len, &ref)) { if (!find_name_in_backref(path, name, name_len, &ref)) {
ret = -ENOENT; ret = -ENOENT;
search_ext_refs = 1;
goto out; goto out;
} }
leaf = path->nodes[0]; leaf = path->nodes[0];
...@@ -129,8 +309,78 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans, ...@@ -129,8 +309,78 @@ int btrfs_del_inode_ref(struct btrfs_trans_handle *trans,
item_start = btrfs_item_ptr_offset(leaf, path->slots[0]); item_start = btrfs_item_ptr_offset(leaf, path->slots[0]);
memmove_extent_buffer(leaf, ptr, ptr + sub_item_len, memmove_extent_buffer(leaf, ptr, ptr + sub_item_len,
item_size - (ptr + sub_item_len - item_start)); item_size - (ptr + sub_item_len - item_start));
btrfs_truncate_item(trans, root, path, btrfs_truncate_item(trans, root, path, item_size - sub_item_len, 1);
item_size - sub_item_len, 1); out:
btrfs_free_path(path);
if (search_ext_refs) {
/*
* No refs were found, or we could not find the
* name in our ref array. Find and remove the extended
* inode ref then.
*/
return btrfs_del_inode_extref(trans, root, name, name_len,
inode_objectid, ref_objectid, index);
}
return ret;
}
/*
* btrfs_insert_inode_extref() - Inserts an extended inode ref into a tree.
*
* The caller must have checked against BTRFS_LINK_MAX already.
*/
static int btrfs_insert_inode_extref(struct btrfs_trans_handle *trans,
struct btrfs_root *root,
const char *name, int name_len,
u64 inode_objectid, u64 ref_objectid, u64 index)
{
struct btrfs_inode_extref *extref;
int ret;
int ins_len = name_len + sizeof(*extref);
unsigned long ptr;
struct btrfs_path *path;
struct btrfs_key key;
struct extent_buffer *leaf;
struct btrfs_item *item;
key.objectid = inode_objectid;
key.type = BTRFS_INODE_EXTREF_KEY;
key.offset = btrfs_extref_hash(ref_objectid, name, name_len);
path = btrfs_alloc_path();
if (!path)
return -ENOMEM;
path->leave_spinning = 1;
ret = btrfs_insert_empty_item(trans, root, path, &key,
ins_len);
if (ret == -EEXIST) {
if (btrfs_find_name_in_ext_backref(path, ref_objectid,
name, name_len, NULL))
goto out;
btrfs_extend_item(trans, root, path, ins_len);
ret = 0;
}
if (ret < 0)
goto out;
leaf = path->nodes[0];
item = btrfs_item_nr(leaf, path->slots[0]);
ptr = (unsigned long)btrfs_item_ptr(leaf, path->slots[0], char);
ptr += btrfs_item_size(leaf, item) - ins_len;
extref = (struct btrfs_inode_extref *)ptr;
btrfs_set_inode_extref_name_len(path->nodes[0], extref, name_len);
btrfs_set_inode_extref_index(path->nodes[0], extref, index);
btrfs_set_inode_extref_parent(path->nodes[0], extref, ref_objectid);
ptr = (unsigned long)&extref->name;
write_extent_buffer(path->nodes[0], name, ptr, name_len);
btrfs_mark_buffer_dirty(path->nodes[0]);
out: out:
btrfs_free_path(path); btrfs_free_path(path);
return ret; return ret;
...@@ -191,6 +441,19 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans, ...@@ -191,6 +441,19 @@ int btrfs_insert_inode_ref(struct btrfs_trans_handle *trans,
out: out:
btrfs_free_path(path); btrfs_free_path(path);
if (ret == -EMLINK) {
struct btrfs_super_block *disk_super = root->fs_info->super_copy;
/* We ran out of space in the ref array. Need to
* add an extended ref. */
if (btrfs_super_incompat_flags(disk_super)
& BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
ret = btrfs_insert_inode_extref(trans, root, name,
name_len,
inode_objectid,
ref_objectid, index);
}
return ret; return ret;
} }
......
This diff is collapsed.
...@@ -181,6 +181,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) ...@@ -181,6 +181,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
int ret; int ret;
u64 ip_oldflags; u64 ip_oldflags;
unsigned int i_oldflags; unsigned int i_oldflags;
umode_t mode;
if (btrfs_root_readonly(root)) if (btrfs_root_readonly(root))
return -EROFS; return -EROFS;
...@@ -203,6 +204,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) ...@@ -203,6 +204,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
ip_oldflags = ip->flags; ip_oldflags = ip->flags;
i_oldflags = inode->i_flags; i_oldflags = inode->i_flags;
mode = inode->i_mode;
flags = btrfs_mask_flags(inode->i_mode, flags); flags = btrfs_mask_flags(inode->i_mode, flags);
oldflags = btrfs_flags_to_ioctl(ip->flags); oldflags = btrfs_flags_to_ioctl(ip->flags);
...@@ -237,10 +239,31 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg) ...@@ -237,10 +239,31 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
ip->flags |= BTRFS_INODE_DIRSYNC; ip->flags |= BTRFS_INODE_DIRSYNC;
else else
ip->flags &= ~BTRFS_INODE_DIRSYNC; ip->flags &= ~BTRFS_INODE_DIRSYNC;
if (flags & FS_NOCOW_FL) if (flags & FS_NOCOW_FL) {
ip->flags |= BTRFS_INODE_NODATACOW; if (S_ISREG(mode)) {
else /*
ip->flags &= ~BTRFS_INODE_NODATACOW; * It's safe to turn csums off here, no extents exist.
* Otherwise we want the flag to reflect the real COW
* status of the file and will not set it.
*/
if (inode->i_size == 0)
ip->flags |= BTRFS_INODE_NODATACOW
| BTRFS_INODE_NODATASUM;
} else {
ip->flags |= BTRFS_INODE_NODATACOW;
}
} else {
/*
* Revert back under same assuptions as above
*/
if (S_ISREG(mode)) {
if (inode->i_size == 0)
ip->flags &= ~(BTRFS_INODE_NODATACOW
| BTRFS_INODE_NODATASUM);
} else {
ip->flags &= ~BTRFS_INODE_NODATACOW;
}
}
/* /*
* The COMPRESS flag can only be changed by users, while the NOCOMPRESS * The COMPRESS flag can only be changed by users, while the NOCOMPRESS
...@@ -516,7 +539,8 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry, ...@@ -516,7 +539,8 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
if (!pending_snapshot) if (!pending_snapshot)
return -ENOMEM; return -ENOMEM;
btrfs_init_block_rsv(&pending_snapshot->block_rsv); btrfs_init_block_rsv(&pending_snapshot->block_rsv,
BTRFS_BLOCK_RSV_TEMP);
pending_snapshot->dentry = dentry; pending_snapshot->dentry = dentry;
pending_snapshot->root = root; pending_snapshot->root = root;
pending_snapshot->readonly = readonly; pending_snapshot->readonly = readonly;
...@@ -525,7 +549,7 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry, ...@@ -525,7 +549,7 @@ static int create_snapshot(struct btrfs_root *root, struct dentry *dentry,
*inherit = NULL; /* take responsibility to free it */ *inherit = NULL; /* take responsibility to free it */
} }
trans = btrfs_start_transaction(root->fs_info->extent_root, 5); trans = btrfs_start_transaction(root->fs_info->extent_root, 6);
if (IS_ERR(trans)) { if (IS_ERR(trans)) {
ret = PTR_ERR(trans); ret = PTR_ERR(trans);
goto fail; goto fail;
...@@ -1022,8 +1046,8 @@ static int cluster_pages_for_defrag(struct inode *inode, ...@@ -1022,8 +1046,8 @@ static int cluster_pages_for_defrag(struct inode *inode,
page_start, page_end - 1, 0, &cached_state); page_start, page_end - 1, 0, &cached_state);
clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start, clear_extent_bit(&BTRFS_I(inode)->io_tree, page_start,
page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC | page_end - 1, EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING, 0, 0, &cached_state, EXTENT_DO_ACCOUNTING | EXTENT_DEFRAG, 0, 0,
GFP_NOFS); &cached_state, GFP_NOFS);
if (i_done != page_cnt) { if (i_done != page_cnt) {
spin_lock(&BTRFS_I(inode)->lock); spin_lock(&BTRFS_I(inode)->lock);
...@@ -1034,8 +1058,8 @@ static int cluster_pages_for_defrag(struct inode *inode, ...@@ -1034,8 +1058,8 @@ static int cluster_pages_for_defrag(struct inode *inode,
} }
btrfs_set_extent_delalloc(inode, page_start, page_end - 1, set_extent_defrag(&BTRFS_I(inode)->io_tree, page_start, page_end - 1,
&cached_state); &cached_state, GFP_NOFS);
unlock_extent_cached(&BTRFS_I(inode)->io_tree, unlock_extent_cached(&BTRFS_I(inode)->io_tree,
page_start, page_end - 1, &cached_state, page_start, page_end - 1, &cached_state,
...@@ -2351,7 +2375,6 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, ...@@ -2351,7 +2375,6 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
int ret; int ret;
u64 len = olen; u64 len = olen;
u64 bs = root->fs_info->sb->s_blocksize; u64 bs = root->fs_info->sb->s_blocksize;
u64 hint_byte;
/* /*
* TODO: * TODO:
...@@ -2456,13 +2479,13 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, ...@@ -2456,13 +2479,13 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
another, and lock file content */ another, and lock file content */
while (1) { while (1) {
struct btrfs_ordered_extent *ordered; struct btrfs_ordered_extent *ordered;
lock_extent(&BTRFS_I(src)->io_tree, off, off+len); lock_extent(&BTRFS_I(src)->io_tree, off, off + len - 1);
ordered = btrfs_lookup_first_ordered_extent(src, off+len); ordered = btrfs_lookup_first_ordered_extent(src, off + len - 1);
if (!ordered && if (!ordered &&
!test_range_bit(&BTRFS_I(src)->io_tree, off, off+len, !test_range_bit(&BTRFS_I(src)->io_tree, off, off + len - 1,
EXTENT_DELALLOC, 0, NULL)) EXTENT_DELALLOC, 0, NULL))
break; break;
unlock_extent(&BTRFS_I(src)->io_tree, off, off+len); unlock_extent(&BTRFS_I(src)->io_tree, off, off + len - 1);
if (ordered) if (ordered)
btrfs_put_ordered_extent(ordered); btrfs_put_ordered_extent(ordered);
btrfs_wait_ordered_range(src, off, len); btrfs_wait_ordered_range(src, off, len);
...@@ -2536,7 +2559,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, ...@@ -2536,7 +2559,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
btrfs_release_path(path); btrfs_release_path(path);
if (key.offset + datal <= off || if (key.offset + datal <= off ||
key.offset >= off+len) key.offset >= off + len - 1)
goto next; goto next;
memcpy(&new_key, &key, sizeof(new_key)); memcpy(&new_key, &key, sizeof(new_key));
...@@ -2574,10 +2597,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, ...@@ -2574,10 +2597,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
datal -= off - key.offset; datal -= off - key.offset;
} }
ret = btrfs_drop_extents(trans, inode, ret = btrfs_drop_extents(trans, root, inode,
new_key.offset, new_key.offset,
new_key.offset + datal, new_key.offset + datal,
&hint_byte, 1); 1);
if (ret) { if (ret) {
btrfs_abort_transaction(trans, root, btrfs_abort_transaction(trans, root,
ret); ret);
...@@ -2637,8 +2660,8 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, ...@@ -2637,8 +2660,8 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
new_key.offset += skip; new_key.offset += skip;
} }
if (key.offset + datal > off+len) if (key.offset + datal > off + len)
trim = key.offset + datal - (off+len); trim = key.offset + datal - (off + len);
if (comp && (skip || trim)) { if (comp && (skip || trim)) {
ret = -EINVAL; ret = -EINVAL;
...@@ -2648,10 +2671,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, ...@@ -2648,10 +2671,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
size -= skip + trim; size -= skip + trim;
datal -= skip + trim; datal -= skip + trim;
ret = btrfs_drop_extents(trans, inode, ret = btrfs_drop_extents(trans, root, inode,
new_key.offset, new_key.offset,
new_key.offset + datal, new_key.offset + datal,
&hint_byte, 1); 1);
if (ret) { if (ret) {
btrfs_abort_transaction(trans, root, btrfs_abort_transaction(trans, root,
ret); ret);
...@@ -2715,7 +2738,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, ...@@ -2715,7 +2738,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
ret = 0; ret = 0;
out: out:
btrfs_release_path(path); btrfs_release_path(path);
unlock_extent(&BTRFS_I(src)->io_tree, off, off+len); unlock_extent(&BTRFS_I(src)->io_tree, off, off + len - 1);
out_unlock: out_unlock:
mutex_unlock(&src->i_mutex); mutex_unlock(&src->i_mutex);
mutex_unlock(&inode->i_mutex); mutex_unlock(&inode->i_mutex);
...@@ -2850,8 +2873,8 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp) ...@@ -2850,8 +2873,8 @@ static long btrfs_ioctl_default_subvol(struct file *file, void __user *argp)
return 0; return 0;
} }
static void get_block_group_info(struct list_head *groups_list, void btrfs_get_block_group_info(struct list_head *groups_list,
struct btrfs_ioctl_space_info *space) struct btrfs_ioctl_space_info *space)
{ {
struct btrfs_block_group_cache *block_group; struct btrfs_block_group_cache *block_group;
...@@ -2959,8 +2982,8 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg) ...@@ -2959,8 +2982,8 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
down_read(&info->groups_sem); down_read(&info->groups_sem);
for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) { for (c = 0; c < BTRFS_NR_RAID_TYPES; c++) {
if (!list_empty(&info->block_groups[c])) { if (!list_empty(&info->block_groups[c])) {
get_block_group_info(&info->block_groups[c], btrfs_get_block_group_info(
&space); &info->block_groups[c], &space);
memcpy(dest, &space, sizeof(space)); memcpy(dest, &space, sizeof(space));
dest++; dest++;
space_args.total_spaces++; space_args.total_spaces++;
...@@ -3208,11 +3231,9 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root, ...@@ -3208,11 +3231,9 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
{ {
int ret = 0; int ret = 0;
int size; int size;
u64 extent_item_pos;
struct btrfs_ioctl_logical_ino_args *loi; struct btrfs_ioctl_logical_ino_args *loi;
struct btrfs_data_container *inodes = NULL; struct btrfs_data_container *inodes = NULL;
struct btrfs_path *path = NULL; struct btrfs_path *path = NULL;
struct btrfs_key key;
if (!capable(CAP_SYS_ADMIN)) if (!capable(CAP_SYS_ADMIN))
return -EPERM; return -EPERM;
...@@ -3230,7 +3251,7 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root, ...@@ -3230,7 +3251,7 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
goto out; goto out;
} }
size = min_t(u32, loi->size, 4096); size = min_t(u32, loi->size, 64 * 1024);
inodes = init_data_container(size); inodes = init_data_container(size);
if (IS_ERR(inodes)) { if (IS_ERR(inodes)) {
ret = PTR_ERR(inodes); ret = PTR_ERR(inodes);
...@@ -3238,22 +3259,13 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root, ...@@ -3238,22 +3259,13 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
goto out; goto out;
} }
ret = extent_from_logical(root->fs_info, loi->logical, path, &key); ret = iterate_inodes_from_logical(loi->logical, root->fs_info, path,
btrfs_release_path(path); build_ino_list, inodes);
if (ret == -EINVAL)
if (ret & BTRFS_EXTENT_FLAG_TREE_BLOCK)
ret = -ENOENT; ret = -ENOENT;
if (ret < 0) if (ret < 0)
goto out; goto out;
extent_item_pos = loi->logical - key.objectid;
ret = iterate_extent_inodes(root->fs_info, key.objectid,
extent_item_pos, 0, build_ino_list,
inodes);
if (ret < 0)
goto out;
ret = copy_to_user((void *)(unsigned long)loi->inodes, ret = copy_to_user((void *)(unsigned long)loi->inodes,
(void *)(unsigned long)inodes, size); (void *)(unsigned long)inodes, size);
if (ret) if (ret)
...@@ -3261,7 +3273,7 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root, ...@@ -3261,7 +3273,7 @@ static long btrfs_ioctl_logical_to_ino(struct btrfs_root *root,
out: out:
btrfs_free_path(path); btrfs_free_path(path);
kfree(inodes); vfree(inodes);
kfree(loi); kfree(loi);
return ret; return ret;
......
...@@ -25,6 +25,8 @@ ...@@ -25,6 +25,8 @@
#include "btrfs_inode.h" #include "btrfs_inode.h"
#include "extent_io.h" #include "extent_io.h"
static struct kmem_cache *btrfs_ordered_extent_cache;
static u64 entry_end(struct btrfs_ordered_extent *entry) static u64 entry_end(struct btrfs_ordered_extent *entry)
{ {
if (entry->file_offset + entry->len < entry->file_offset) if (entry->file_offset + entry->len < entry->file_offset)
...@@ -187,7 +189,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, ...@@ -187,7 +189,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
struct btrfs_ordered_extent *entry; struct btrfs_ordered_extent *entry;
tree = &BTRFS_I(inode)->ordered_tree; tree = &BTRFS_I(inode)->ordered_tree;
entry = kzalloc(sizeof(*entry), GFP_NOFS); entry = kmem_cache_zalloc(btrfs_ordered_extent_cache, GFP_NOFS);
if (!entry) if (!entry)
return -ENOMEM; return -ENOMEM;
...@@ -421,7 +423,7 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry) ...@@ -421,7 +423,7 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
list_del(&sum->list); list_del(&sum->list);
kfree(sum); kfree(sum);
} }
kfree(entry); kmem_cache_free(btrfs_ordered_extent_cache, entry);
} }
} }
...@@ -466,8 +468,7 @@ void btrfs_remove_ordered_extent(struct inode *inode, ...@@ -466,8 +468,7 @@ void btrfs_remove_ordered_extent(struct inode *inode,
* wait for all the ordered extents in a root. This is done when balancing * wait for all the ordered extents in a root. This is done when balancing
* space between drives. * space between drives.
*/ */
void btrfs_wait_ordered_extents(struct btrfs_root *root, void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput)
int nocow_only, int delay_iput)
{ {
struct list_head splice; struct list_head splice;
struct list_head *cur; struct list_head *cur;
...@@ -482,15 +483,6 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root, ...@@ -482,15 +483,6 @@ void btrfs_wait_ordered_extents(struct btrfs_root *root,
cur = splice.next; cur = splice.next;
ordered = list_entry(cur, struct btrfs_ordered_extent, ordered = list_entry(cur, struct btrfs_ordered_extent,
root_extent_list); root_extent_list);
if (nocow_only &&
!test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags) &&
!test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
list_move(&ordered->root_extent_list,
&root->fs_info->ordered_extents);
cond_resched_lock(&root->fs_info->ordered_extent_lock);
continue;
}
list_del_init(&ordered->root_extent_list); list_del_init(&ordered->root_extent_list);
atomic_inc(&ordered->refs); atomic_inc(&ordered->refs);
...@@ -775,7 +767,6 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, ...@@ -775,7 +767,6 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
u64 disk_i_size; u64 disk_i_size;
u64 new_i_size; u64 new_i_size;
u64 i_size_test;
u64 i_size = i_size_read(inode); u64 i_size = i_size_read(inode);
struct rb_node *node; struct rb_node *node;
struct rb_node *prev = NULL; struct rb_node *prev = NULL;
...@@ -835,55 +826,30 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset, ...@@ -835,55 +826,30 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
break; break;
if (test->file_offset >= i_size) if (test->file_offset >= i_size)
break; break;
if (test->file_offset >= disk_i_size) if (test->file_offset >= disk_i_size) {
/*
* we don't update disk_i_size now, so record this
* undealt i_size. Or we will not know the real
* i_size.
*/
if (test->outstanding_isize < offset)
test->outstanding_isize = offset;
if (ordered &&
ordered->outstanding_isize >
test->outstanding_isize)
test->outstanding_isize =
ordered->outstanding_isize;
goto out; goto out;
}
new_i_size = min_t(u64, offset, i_size);
/*
* at this point, we know we can safely update i_size to at least
* the offset from this ordered extent. But, we need to
* walk forward and see if ios from higher up in the file have
* finished.
*/
if (ordered) {
node = rb_next(&ordered->rb_node);
} else {
if (prev)
node = rb_next(prev);
else
node = rb_first(&tree->tree);
}
/*
* We are looking for an area between our current extent and the next
* ordered extent to update the i_size to. There are 3 cases here
*
* 1) We don't actually have anything and we can update to i_size.
* 2) We have stuff but they already did their i_size update so again we
* can just update to i_size.
* 3) We have an outstanding ordered extent so the most we can update
* our disk_i_size to is the start of the next offset.
*/
i_size_test = i_size;
for (; node; node = rb_next(node)) {
test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
continue;
if (test->file_offset > offset) {
i_size_test = test->file_offset;
break;
} }
} }
new_i_size = min_t(u64, offset, i_size);
/* /*
* i_size_test is the end of a region after this ordered * Some ordered extents may completed before the current one, and
* extent where there are no ordered extents, we can safely set * we hold the real i_size in ->outstanding_isize.
* disk_i_size to this.
*/ */
if (i_size_test > offset) if (ordered && ordered->outstanding_isize > new_i_size)
new_i_size = min_t(u64, i_size_test, i_size); new_i_size = min_t(u64, ordered->outstanding_isize, i_size);
BTRFS_I(inode)->disk_i_size = new_i_size; BTRFS_I(inode)->disk_i_size = new_i_size;
ret = 0; ret = 0;
out: out:
...@@ -984,3 +950,20 @@ void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, ...@@ -984,3 +950,20 @@ void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
} }
spin_unlock(&root->fs_info->ordered_extent_lock); spin_unlock(&root->fs_info->ordered_extent_lock);
} }
int __init ordered_data_init(void)
{
btrfs_ordered_extent_cache = kmem_cache_create("btrfs_ordered_extent",
sizeof(struct btrfs_ordered_extent), 0,
SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD,
NULL);
if (!btrfs_ordered_extent_cache)
return -ENOMEM;
return 0;
}
void ordered_data_exit(void)
{
if (btrfs_ordered_extent_cache)
kmem_cache_destroy(btrfs_ordered_extent_cache);
}
...@@ -96,6 +96,13 @@ struct btrfs_ordered_extent { ...@@ -96,6 +96,13 @@ struct btrfs_ordered_extent {
/* number of bytes that still need writing */ /* number of bytes that still need writing */
u64 bytes_left; u64 bytes_left;
/*
* the end of the ordered extent which is behind it but
* didn't update disk_i_size. Please see the comment of
* btrfs_ordered_update_i_size();
*/
u64 outstanding_isize;
/* flags (described above) */ /* flags (described above) */
unsigned long flags; unsigned long flags;
...@@ -183,6 +190,7 @@ void btrfs_run_ordered_operations(struct btrfs_root *root, int wait); ...@@ -183,6 +190,7 @@ void btrfs_run_ordered_operations(struct btrfs_root *root, int wait);
void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans, void btrfs_add_ordered_operation(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
struct inode *inode); struct inode *inode);
void btrfs_wait_ordered_extents(struct btrfs_root *root, void btrfs_wait_ordered_extents(struct btrfs_root *root, int delay_iput);
int nocow_only, int delay_iput); int __init ordered_data_init(void);
void ordered_data_exit(void);
#endif #endif
...@@ -1145,12 +1145,12 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans, ...@@ -1145,12 +1145,12 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
ulist_reinit(tmp); ulist_reinit(tmp);
/* XXX id not needed */ /* XXX id not needed */
ulist_add(tmp, qg->qgroupid, (unsigned long)qg, GFP_ATOMIC); ulist_add(tmp, qg->qgroupid, (u64)(uintptr_t)qg, GFP_ATOMIC);
ULIST_ITER_INIT(&tmp_uiter); ULIST_ITER_INIT(&tmp_uiter);
while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) { while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
struct btrfs_qgroup_list *glist; struct btrfs_qgroup_list *glist;
qg = (struct btrfs_qgroup *)tmp_unode->aux; qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux;
if (qg->refcnt < seq) if (qg->refcnt < seq)
qg->refcnt = seq + 1; qg->refcnt = seq + 1;
else else
...@@ -1158,7 +1158,7 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans, ...@@ -1158,7 +1158,7 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
list_for_each_entry(glist, &qg->groups, next_group) { list_for_each_entry(glist, &qg->groups, next_group) {
ulist_add(tmp, glist->group->qgroupid, ulist_add(tmp, glist->group->qgroupid,
(unsigned long)glist->group, (u64)(uintptr_t)glist->group,
GFP_ATOMIC); GFP_ATOMIC);
} }
} }
...@@ -1168,13 +1168,13 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans, ...@@ -1168,13 +1168,13 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
* step 2: walk from the new root * step 2: walk from the new root
*/ */
ulist_reinit(tmp); ulist_reinit(tmp);
ulist_add(tmp, qgroup->qgroupid, (unsigned long)qgroup, GFP_ATOMIC); ulist_add(tmp, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC);
ULIST_ITER_INIT(&uiter); ULIST_ITER_INIT(&uiter);
while ((unode = ulist_next(tmp, &uiter))) { while ((unode = ulist_next(tmp, &uiter))) {
struct btrfs_qgroup *qg; struct btrfs_qgroup *qg;
struct btrfs_qgroup_list *glist; struct btrfs_qgroup_list *glist;
qg = (struct btrfs_qgroup *)unode->aux; qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
if (qg->refcnt < seq) { if (qg->refcnt < seq) {
/* not visited by step 1 */ /* not visited by step 1 */
qg->rfer += sgn * node->num_bytes; qg->rfer += sgn * node->num_bytes;
...@@ -1190,7 +1190,7 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans, ...@@ -1190,7 +1190,7 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
list_for_each_entry(glist, &qg->groups, next_group) { list_for_each_entry(glist, &qg->groups, next_group) {
ulist_add(tmp, glist->group->qgroupid, ulist_add(tmp, glist->group->qgroupid,
(unsigned long)glist->group, GFP_ATOMIC); (uintptr_t)glist->group, GFP_ATOMIC);
} }
} }
...@@ -1208,12 +1208,12 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans, ...@@ -1208,12 +1208,12 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
continue; continue;
ulist_reinit(tmp); ulist_reinit(tmp);
ulist_add(tmp, qg->qgroupid, (unsigned long)qg, GFP_ATOMIC); ulist_add(tmp, qg->qgroupid, (uintptr_t)qg, GFP_ATOMIC);
ULIST_ITER_INIT(&tmp_uiter); ULIST_ITER_INIT(&tmp_uiter);
while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) { while ((tmp_unode = ulist_next(tmp, &tmp_uiter))) {
struct btrfs_qgroup_list *glist; struct btrfs_qgroup_list *glist;
qg = (struct btrfs_qgroup *)tmp_unode->aux; qg = (struct btrfs_qgroup *)(uintptr_t)tmp_unode->aux;
if (qg->tag == seq) if (qg->tag == seq)
continue; continue;
...@@ -1225,7 +1225,7 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans, ...@@ -1225,7 +1225,7 @@ int btrfs_qgroup_account_ref(struct btrfs_trans_handle *trans,
list_for_each_entry(glist, &qg->groups, next_group) { list_for_each_entry(glist, &qg->groups, next_group) {
ulist_add(tmp, glist->group->qgroupid, ulist_add(tmp, glist->group->qgroupid,
(unsigned long)glist->group, (uintptr_t)glist->group,
GFP_ATOMIC); GFP_ATOMIC);
} }
} }
...@@ -1469,13 +1469,17 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes) ...@@ -1469,13 +1469,17 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
* be exceeded * be exceeded
*/ */
ulist = ulist_alloc(GFP_ATOMIC); ulist = ulist_alloc(GFP_ATOMIC);
ulist_add(ulist, qgroup->qgroupid, (unsigned long)qgroup, GFP_ATOMIC); if (!ulist) {
ret = -ENOMEM;
goto out;
}
ulist_add(ulist, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC);
ULIST_ITER_INIT(&uiter); ULIST_ITER_INIT(&uiter);
while ((unode = ulist_next(ulist, &uiter))) { while ((unode = ulist_next(ulist, &uiter))) {
struct btrfs_qgroup *qg; struct btrfs_qgroup *qg;
struct btrfs_qgroup_list *glist; struct btrfs_qgroup_list *glist;
qg = (struct btrfs_qgroup *)unode->aux; qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) && if ((qg->lim_flags & BTRFS_QGROUP_LIMIT_MAX_RFER) &&
qg->reserved + qg->rfer + num_bytes > qg->reserved + qg->rfer + num_bytes >
...@@ -1489,7 +1493,7 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes) ...@@ -1489,7 +1493,7 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
list_for_each_entry(glist, &qg->groups, next_group) { list_for_each_entry(glist, &qg->groups, next_group) {
ulist_add(ulist, glist->group->qgroupid, ulist_add(ulist, glist->group->qgroupid,
(unsigned long)glist->group, GFP_ATOMIC); (uintptr_t)glist->group, GFP_ATOMIC);
} }
} }
if (ret) if (ret)
...@@ -1502,7 +1506,7 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes) ...@@ -1502,7 +1506,7 @@ int btrfs_qgroup_reserve(struct btrfs_root *root, u64 num_bytes)
while ((unode = ulist_next(ulist, &uiter))) { while ((unode = ulist_next(ulist, &uiter))) {
struct btrfs_qgroup *qg; struct btrfs_qgroup *qg;
qg = (struct btrfs_qgroup *)unode->aux; qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
qg->reserved += num_bytes; qg->reserved += num_bytes;
} }
...@@ -1541,19 +1545,23 @@ void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes) ...@@ -1541,19 +1545,23 @@ void btrfs_qgroup_free(struct btrfs_root *root, u64 num_bytes)
goto out; goto out;
ulist = ulist_alloc(GFP_ATOMIC); ulist = ulist_alloc(GFP_ATOMIC);
ulist_add(ulist, qgroup->qgroupid, (unsigned long)qgroup, GFP_ATOMIC); if (!ulist) {
btrfs_std_error(fs_info, -ENOMEM);
goto out;
}
ulist_add(ulist, qgroup->qgroupid, (uintptr_t)qgroup, GFP_ATOMIC);
ULIST_ITER_INIT(&uiter); ULIST_ITER_INIT(&uiter);
while ((unode = ulist_next(ulist, &uiter))) { while ((unode = ulist_next(ulist, &uiter))) {
struct btrfs_qgroup *qg; struct btrfs_qgroup *qg;
struct btrfs_qgroup_list *glist; struct btrfs_qgroup_list *glist;
qg = (struct btrfs_qgroup *)unode->aux; qg = (struct btrfs_qgroup *)(uintptr_t)unode->aux;
qg->reserved -= num_bytes; qg->reserved -= num_bytes;
list_for_each_entry(glist, &qg->groups, next_group) { list_for_each_entry(glist, &qg->groups, next_group) {
ulist_add(ulist, glist->group->qgroupid, ulist_add(ulist, glist->group->qgroupid,
(unsigned long)glist->group, GFP_ATOMIC); (uintptr_t)glist->group, GFP_ATOMIC);
} }
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -130,4 +130,5 @@ enum { ...@@ -130,4 +130,5 @@ enum {
#ifdef __KERNEL__ #ifdef __KERNEL__
long btrfs_ioctl_send(struct file *mnt_file, void __user *arg); long btrfs_ioctl_send(struct file *mnt_file, void __user *arg);
int write_buf(struct file *filp, const void *buf, u32 len, loff_t *off);
#endif #endif
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment