Commit 5c80c71b authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable:
  Btrfs: set i_size properly when fallocating and we already
  btrfs: unlock on error in btrfs_file_llseek()
  btrfs: btrfs_permission's RO check shouldn't apply to device nodes
  Btrfs: truncate pages from clone ioctl target range
  Btrfs: fix uninitialized sync_pending
  Btrfs: fix wrong free space information
  btrfs: memory leak in btrfs_add_inode_defrag()
  Btrfs: use plain page_address() in header fields setget functions
  Btrfs: forced readonly when btrfs_drop_snapshot() fails
  Btrfs: check if there is enough space for balancing smarter
  Btrfs: fix a bug of balance on full multi-disk partitions
  Btrfs: fix an oops of log replay
  Btrfs: detect wether a device supports discard
  Btrfs: force unplugs when switching from high to regular priority bios
parents 01fa4ba5 81d86e1b
...@@ -1415,17 +1415,15 @@ void btrfs_set_##name(struct extent_buffer *eb, type *s, u##bits val); ...@@ -1415,17 +1415,15 @@ void btrfs_set_##name(struct extent_buffer *eb, type *s, u##bits val);
#define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \ #define BTRFS_SETGET_HEADER_FUNCS(name, type, member, bits) \
static inline u##bits btrfs_##name(struct extent_buffer *eb) \ static inline u##bits btrfs_##name(struct extent_buffer *eb) \
{ \ { \
type *p = kmap_atomic(eb->first_page, KM_USER0); \ type *p = page_address(eb->first_page); \
u##bits res = le##bits##_to_cpu(p->member); \ u##bits res = le##bits##_to_cpu(p->member); \
kunmap_atomic(p, KM_USER0); \
return res; \ return res; \
} \ } \
static inline void btrfs_set_##name(struct extent_buffer *eb, \ static inline void btrfs_set_##name(struct extent_buffer *eb, \
u##bits val) \ u##bits val) \
{ \ { \
type *p = kmap_atomic(eb->first_page, KM_USER0); \ type *p = page_address(eb->first_page); \
p->member = cpu_to_le##bits(val); \ p->member = cpu_to_le##bits(val); \
kunmap_atomic(p, KM_USER0); \
} }
#define BTRFS_SETGET_STACK_FUNCS(name, type, member, bits) \ #define BTRFS_SETGET_STACK_FUNCS(name, type, member, bits) \
...@@ -2367,8 +2365,8 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans, ...@@ -2367,8 +2365,8 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path);
int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
int btrfs_drop_snapshot(struct btrfs_root *root, void btrfs_drop_snapshot(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, int update_ref); struct btrfs_block_rsv *block_rsv, int update_ref);
int btrfs_drop_subtree(struct btrfs_trans_handle *trans, int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
struct btrfs_root *root, struct btrfs_root *root,
struct extent_buffer *node, struct extent_buffer *node,
......
...@@ -1782,6 +1782,9 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, ...@@ -1782,6 +1782,9 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
for (i = 0; i < multi->num_stripes; i++, stripe++) { for (i = 0; i < multi->num_stripes; i++, stripe++) {
if (!stripe->dev->can_discard)
continue;
ret = btrfs_issue_discard(stripe->dev->bdev, ret = btrfs_issue_discard(stripe->dev->bdev,
stripe->physical, stripe->physical,
stripe->length); stripe->length);
...@@ -1789,11 +1792,16 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr, ...@@ -1789,11 +1792,16 @@ static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
discarded_bytes += stripe->length; discarded_bytes += stripe->length;
else if (ret != -EOPNOTSUPP) else if (ret != -EOPNOTSUPP)
break; break;
/*
* Just in case we get back EOPNOTSUPP for some reason,
* just ignore the return value so we don't screw up
* people calling discard_extent.
*/
ret = 0;
} }
kfree(multi); kfree(multi);
} }
if (discarded_bytes && ret == -EOPNOTSUPP)
ret = 0;
if (actual_bytes) if (actual_bytes)
*actual_bytes = discarded_bytes; *actual_bytes = discarded_bytes;
...@@ -6269,8 +6277,8 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans, ...@@ -6269,8 +6277,8 @@ static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
* also make sure backrefs for the shared block and all lower level * also make sure backrefs for the shared block and all lower level
* blocks are properly updated. * blocks are properly updated.
*/ */
int btrfs_drop_snapshot(struct btrfs_root *root, void btrfs_drop_snapshot(struct btrfs_root *root,
struct btrfs_block_rsv *block_rsv, int update_ref) struct btrfs_block_rsv *block_rsv, int update_ref)
{ {
struct btrfs_path *path; struct btrfs_path *path;
struct btrfs_trans_handle *trans; struct btrfs_trans_handle *trans;
...@@ -6283,13 +6291,16 @@ int btrfs_drop_snapshot(struct btrfs_root *root, ...@@ -6283,13 +6291,16 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
int level; int level;
path = btrfs_alloc_path(); path = btrfs_alloc_path();
if (!path) if (!path) {
return -ENOMEM; err = -ENOMEM;
goto out;
}
wc = kzalloc(sizeof(*wc), GFP_NOFS); wc = kzalloc(sizeof(*wc), GFP_NOFS);
if (!wc) { if (!wc) {
btrfs_free_path(path); btrfs_free_path(path);
return -ENOMEM; err = -ENOMEM;
goto out;
} }
trans = btrfs_start_transaction(tree_root, 0); trans = btrfs_start_transaction(tree_root, 0);
...@@ -6318,7 +6329,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root, ...@@ -6318,7 +6329,7 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
path->lowest_level = 0; path->lowest_level = 0;
if (ret < 0) { if (ret < 0) {
err = ret; err = ret;
goto out; goto out_free;
} }
WARN_ON(ret > 0); WARN_ON(ret > 0);
...@@ -6425,11 +6436,14 @@ int btrfs_drop_snapshot(struct btrfs_root *root, ...@@ -6425,11 +6436,14 @@ int btrfs_drop_snapshot(struct btrfs_root *root,
free_extent_buffer(root->commit_root); free_extent_buffer(root->commit_root);
kfree(root); kfree(root);
} }
out: out_free:
btrfs_end_transaction_throttle(trans, tree_root); btrfs_end_transaction_throttle(trans, tree_root);
kfree(wc); kfree(wc);
btrfs_free_path(path); btrfs_free_path(path);
return err; out:
if (err)
btrfs_std_error(root->fs_info, err);
return;
} }
/* /*
...@@ -6720,6 +6734,10 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) ...@@ -6720,6 +6734,10 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
struct btrfs_space_info *space_info; struct btrfs_space_info *space_info;
struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices; struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
struct btrfs_device *device; struct btrfs_device *device;
u64 min_free;
int index;
int dev_nr = 0;
int dev_min = 1;
int full = 0; int full = 0;
int ret = 0; int ret = 0;
...@@ -6729,8 +6747,10 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) ...@@ -6729,8 +6747,10 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
if (!block_group) if (!block_group)
return -1; return -1;
min_free = btrfs_block_group_used(&block_group->item);
/* no bytes used, we're good */ /* no bytes used, we're good */
if (!btrfs_block_group_used(&block_group->item)) if (!min_free)
goto out; goto out;
space_info = block_group->space_info; space_info = block_group->space_info;
...@@ -6746,10 +6766,9 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) ...@@ -6746,10 +6766,9 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
* all of the extents from this block group. If we can, we're good * all of the extents from this block group. If we can, we're good
*/ */
if ((space_info->total_bytes != block_group->key.offset) && if ((space_info->total_bytes != block_group->key.offset) &&
(space_info->bytes_used + space_info->bytes_reserved + (space_info->bytes_used + space_info->bytes_reserved +
space_info->bytes_pinned + space_info->bytes_readonly + space_info->bytes_pinned + space_info->bytes_readonly +
btrfs_block_group_used(&block_group->item) < min_free < space_info->total_bytes)) {
space_info->total_bytes)) {
spin_unlock(&space_info->lock); spin_unlock(&space_info->lock);
goto out; goto out;
} }
...@@ -6766,9 +6785,29 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) ...@@ -6766,9 +6785,29 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
if (full) if (full)
goto out; goto out;
/*
* index:
* 0: raid10
* 1: raid1
* 2: dup
* 3: raid0
* 4: single
*/
index = get_block_group_index(block_group);
if (index == 0) {
dev_min = 4;
min_free /= 2;
} else if (index == 1) {
dev_min = 2;
} else if (index == 2) {
min_free *= 2;
} else if (index == 3) {
dev_min = fs_devices->rw_devices;
min_free /= dev_min;
}
mutex_lock(&root->fs_info->chunk_mutex); mutex_lock(&root->fs_info->chunk_mutex);
list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) { list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
u64 min_free = btrfs_block_group_used(&block_group->item);
u64 dev_offset; u64 dev_offset;
/* /*
...@@ -6779,7 +6818,11 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr) ...@@ -6779,7 +6818,11 @@ int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
ret = find_free_dev_extent(NULL, device, min_free, ret = find_free_dev_extent(NULL, device, min_free,
&dev_offset, NULL); &dev_offset, NULL);
if (!ret) if (!ret)
dev_nr++;
if (dev_nr >= dev_min)
break; break;
ret = -1; ret = -1;
} }
} }
......
...@@ -150,6 +150,8 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans, ...@@ -150,6 +150,8 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
spin_lock(&root->fs_info->defrag_inodes_lock); spin_lock(&root->fs_info->defrag_inodes_lock);
if (!BTRFS_I(inode)->in_defrag) if (!BTRFS_I(inode)->in_defrag)
__btrfs_add_inode_defrag(inode, defrag); __btrfs_add_inode_defrag(inode, defrag);
else
kfree(defrag);
spin_unlock(&root->fs_info->defrag_inodes_lock); spin_unlock(&root->fs_info->defrag_inodes_lock);
return 0; return 0;
} }
...@@ -1638,11 +1640,15 @@ static long btrfs_fallocate(struct file *file, int mode, ...@@ -1638,11 +1640,15 @@ static long btrfs_fallocate(struct file *file, int mode,
cur_offset = alloc_start; cur_offset = alloc_start;
while (1) { while (1) {
u64 actual_end;
em = btrfs_get_extent(inode, NULL, 0, cur_offset, em = btrfs_get_extent(inode, NULL, 0, cur_offset,
alloc_end - cur_offset, 0); alloc_end - cur_offset, 0);
BUG_ON(IS_ERR_OR_NULL(em)); BUG_ON(IS_ERR_OR_NULL(em));
last_byte = min(extent_map_end(em), alloc_end); last_byte = min(extent_map_end(em), alloc_end);
actual_end = min_t(u64, extent_map_end(em), offset + len);
last_byte = (last_byte + mask) & ~mask; last_byte = (last_byte + mask) & ~mask;
if (em->block_start == EXTENT_MAP_HOLE || if (em->block_start == EXTENT_MAP_HOLE ||
(cur_offset >= inode->i_size && (cur_offset >= inode->i_size &&
!test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) { !test_bit(EXTENT_FLAG_PREALLOC, &em->flags))) {
...@@ -1655,6 +1661,16 @@ static long btrfs_fallocate(struct file *file, int mode, ...@@ -1655,6 +1661,16 @@ static long btrfs_fallocate(struct file *file, int mode,
free_extent_map(em); free_extent_map(em);
break; break;
} }
} else if (actual_end > inode->i_size &&
!(mode & FALLOC_FL_KEEP_SIZE)) {
/*
* We didn't need to allocate any more space, but we
* still extended the size of the file so we need to
* update i_size.
*/
inode->i_ctime = CURRENT_TIME;
i_size_write(inode, actual_end);
btrfs_ordered_update_i_size(inode, actual_end, NULL);
} }
free_extent_map(em); free_extent_map(em);
...@@ -1804,10 +1820,14 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin) ...@@ -1804,10 +1820,14 @@ static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int origin)
} }
} }
if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) if (offset < 0 && !(file->f_mode & FMODE_UNSIGNED_OFFSET)) {
return -EINVAL; ret = -EINVAL;
if (offset > inode->i_sb->s_maxbytes) goto out;
return -EINVAL; }
if (offset > inode->i_sb->s_maxbytes) {
ret = -EINVAL;
goto out;
}
/* Special lock needed here? */ /* Special lock needed here? */
if (offset != file->f_pos) { if (offset != file->f_pos) {
......
...@@ -1168,9 +1168,9 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl) ...@@ -1168,9 +1168,9 @@ static void recalculate_thresholds(struct btrfs_free_space_ctl *ctl)
div64_u64(extent_bytes, (sizeof(struct btrfs_free_space))); div64_u64(extent_bytes, (sizeof(struct btrfs_free_space)));
} }
static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info, u64 offset, struct btrfs_free_space *info,
u64 bytes) u64 offset, u64 bytes)
{ {
unsigned long start, count; unsigned long start, count;
...@@ -1181,6 +1181,13 @@ static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl, ...@@ -1181,6 +1181,13 @@ static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
bitmap_clear(info->bitmap, start, count); bitmap_clear(info->bitmap, start, count);
info->bytes -= bytes; info->bytes -= bytes;
}
static void bitmap_clear_bits(struct btrfs_free_space_ctl *ctl,
struct btrfs_free_space *info, u64 offset,
u64 bytes)
{
__bitmap_clear_bits(ctl, info, offset, bytes);
ctl->free_space -= bytes; ctl->free_space -= bytes;
} }
...@@ -1984,7 +1991,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group, ...@@ -1984,7 +1991,7 @@ static u64 btrfs_alloc_from_bitmap(struct btrfs_block_group_cache *block_group,
return 0; return 0;
ret = search_start; ret = search_start;
bitmap_clear_bits(ctl, entry, ret, bytes); __bitmap_clear_bits(ctl, entry, ret, bytes);
return ret; return ret;
} }
...@@ -2039,7 +2046,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, ...@@ -2039,7 +2046,6 @@ u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group,
continue; continue;
} }
} else { } else {
ret = entry->offset; ret = entry->offset;
entry->offset += bytes; entry->offset += bytes;
......
...@@ -7354,11 +7354,15 @@ static int btrfs_set_page_dirty(struct page *page) ...@@ -7354,11 +7354,15 @@ static int btrfs_set_page_dirty(struct page *page)
static int btrfs_permission(struct inode *inode, int mask) static int btrfs_permission(struct inode *inode, int mask)
{ {
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
umode_t mode = inode->i_mode;
if (btrfs_root_readonly(root) && (mask & MAY_WRITE)) if (mask & MAY_WRITE &&
return -EROFS; (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))) {
if ((BTRFS_I(inode)->flags & BTRFS_INODE_READONLY) && (mask & MAY_WRITE)) if (btrfs_root_readonly(root))
return -EACCES; return -EROFS;
if (BTRFS_I(inode)->flags & BTRFS_INODE_READONLY)
return -EACCES;
}
return generic_permission(inode, mask); return generic_permission(inode, mask);
} }
......
...@@ -2236,6 +2236,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd, ...@@ -2236,6 +2236,10 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
btrfs_wait_ordered_range(src, off, len); btrfs_wait_ordered_range(src, off, len);
} }
/* truncate page cache pages from target inode range */
truncate_inode_pages_range(&inode->i_data, off,
ALIGN(off + len, PAGE_CACHE_SIZE) - 1);
/* clone data */ /* clone data */
key.objectid = btrfs_ino(src); key.objectid = btrfs_ino(src);
key.type = BTRFS_EXTENT_DATA_KEY; key.type = BTRFS_EXTENT_DATA_KEY;
......
...@@ -799,14 +799,15 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, ...@@ -799,14 +799,15 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
struct extent_buffer *eb, int slot, struct extent_buffer *eb, int slot,
struct btrfs_key *key) struct btrfs_key *key)
{ {
struct inode *dir;
int ret;
struct btrfs_inode_ref *ref; struct btrfs_inode_ref *ref;
struct btrfs_dir_item *di;
struct inode *dir;
struct inode *inode; struct inode *inode;
char *name;
int namelen;
unsigned long ref_ptr; unsigned long ref_ptr;
unsigned long ref_end; unsigned long ref_end;
char *name;
int namelen;
int ret;
int search_done = 0; int search_done = 0;
/* /*
...@@ -909,6 +910,25 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans, ...@@ -909,6 +910,25 @@ static noinline int add_inode_ref(struct btrfs_trans_handle *trans,
} }
btrfs_release_path(path); btrfs_release_path(path);
/* look for a conflicting sequence number */
di = btrfs_lookup_dir_index_item(trans, root, path, btrfs_ino(dir),
btrfs_inode_ref_index(eb, ref),
name, namelen, 0);
if (di && !IS_ERR(di)) {
ret = drop_one_dir_item(trans, root, path, dir, di);
BUG_ON(ret);
}
btrfs_release_path(path);
/* look for a conflicing name */
di = btrfs_lookup_dir_item(trans, root, path, btrfs_ino(dir),
name, namelen, 0);
if (di && !IS_ERR(di)) {
ret = drop_one_dir_item(trans, root, path, dir, di);
BUG_ON(ret);
}
btrfs_release_path(path);
insert: insert:
/* insert our name */ /* insert our name */
ret = btrfs_add_link(trans, dir, inode, name, namelen, 0, ret = btrfs_add_link(trans, dir, inode, name, namelen, 0,
......
...@@ -142,6 +142,7 @@ static noinline int run_scheduled_bios(struct btrfs_device *device) ...@@ -142,6 +142,7 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
unsigned long limit; unsigned long limit;
unsigned long last_waited = 0; unsigned long last_waited = 0;
int force_reg = 0; int force_reg = 0;
int sync_pending = 0;
struct blk_plug plug; struct blk_plug plug;
/* /*
...@@ -229,6 +230,22 @@ static noinline int run_scheduled_bios(struct btrfs_device *device) ...@@ -229,6 +230,22 @@ static noinline int run_scheduled_bios(struct btrfs_device *device)
BUG_ON(atomic_read(&cur->bi_cnt) == 0); BUG_ON(atomic_read(&cur->bi_cnt) == 0);
/*
* if we're doing the sync list, record that our
* plug has some sync requests on it
*
* If we're doing the regular list and there are
* sync requests sitting around, unplug before
* we add more
*/
if (pending_bios == &device->pending_sync_bios) {
sync_pending = 1;
} else if (sync_pending) {
blk_finish_plug(&plug);
blk_start_plug(&plug);
sync_pending = 0;
}
submit_bio(cur->bi_rw, cur); submit_bio(cur->bi_rw, cur);
num_run++; num_run++;
batch_run++; batch_run++;
...@@ -500,6 +517,9 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) ...@@ -500,6 +517,9 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
fs_devices->rw_devices--; fs_devices->rw_devices--;
} }
if (device->can_discard)
fs_devices->num_can_discard--;
new_device = kmalloc(sizeof(*new_device), GFP_NOFS); new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
BUG_ON(!new_device); BUG_ON(!new_device);
memcpy(new_device, device, sizeof(*new_device)); memcpy(new_device, device, sizeof(*new_device));
...@@ -508,6 +528,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices) ...@@ -508,6 +528,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
new_device->bdev = NULL; new_device->bdev = NULL;
new_device->writeable = 0; new_device->writeable = 0;
new_device->in_fs_metadata = 0; new_device->in_fs_metadata = 0;
new_device->can_discard = 0;
list_replace_rcu(&device->dev_list, &new_device->dev_list); list_replace_rcu(&device->dev_list, &new_device->dev_list);
call_rcu(&device->rcu, free_device); call_rcu(&device->rcu, free_device);
...@@ -547,6 +568,7 @@ int btrfs_close_devices(struct btrfs_fs_devices *fs_devices) ...@@ -547,6 +568,7 @@ int btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
fmode_t flags, void *holder) fmode_t flags, void *holder)
{ {
struct request_queue *q;
struct block_device *bdev; struct block_device *bdev;
struct list_head *head = &fs_devices->devices; struct list_head *head = &fs_devices->devices;
struct btrfs_device *device; struct btrfs_device *device;
...@@ -603,6 +625,12 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices, ...@@ -603,6 +625,12 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
seeding = 0; seeding = 0;
} }
q = bdev_get_queue(bdev);
if (blk_queue_discard(q)) {
device->can_discard = 1;
fs_devices->num_can_discard++;
}
device->bdev = bdev; device->bdev = bdev;
device->in_fs_metadata = 0; device->in_fs_metadata = 0;
device->mode = flags; device->mode = flags;
...@@ -835,6 +863,7 @@ int find_free_dev_extent(struct btrfs_trans_handle *trans, ...@@ -835,6 +863,7 @@ int find_free_dev_extent(struct btrfs_trans_handle *trans,
max_hole_start = search_start; max_hole_start = search_start;
max_hole_size = 0; max_hole_size = 0;
hole_size = 0;
if (search_start >= search_end) { if (search_start >= search_end) {
ret = -ENOSPC; ret = -ENOSPC;
...@@ -917,7 +946,14 @@ int find_free_dev_extent(struct btrfs_trans_handle *trans, ...@@ -917,7 +946,14 @@ int find_free_dev_extent(struct btrfs_trans_handle *trans,
cond_resched(); cond_resched();
} }
hole_size = search_end- search_start; /*
* At this point, search_start should be the end of
* allocated dev extents, and when shrinking the device,
* search_end may be smaller than search_start.
*/
if (search_end > search_start)
hole_size = search_end - search_start;
if (hole_size > max_hole_size) { if (hole_size > max_hole_size) {
max_hole_start = search_start; max_hole_start = search_start;
max_hole_size = hole_size; max_hole_size = hole_size;
...@@ -1543,6 +1579,7 @@ static int btrfs_finish_sprout(struct btrfs_trans_handle *trans, ...@@ -1543,6 +1579,7 @@ static int btrfs_finish_sprout(struct btrfs_trans_handle *trans,
int btrfs_init_new_device(struct btrfs_root *root, char *device_path) int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
{ {
struct request_queue *q;
struct btrfs_trans_handle *trans; struct btrfs_trans_handle *trans;
struct btrfs_device *device; struct btrfs_device *device;
struct block_device *bdev; struct block_device *bdev;
...@@ -1612,6 +1649,9 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) ...@@ -1612,6 +1649,9 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
lock_chunks(root); lock_chunks(root);
q = bdev_get_queue(bdev);
if (blk_queue_discard(q))
device->can_discard = 1;
device->writeable = 1; device->writeable = 1;
device->work.func = pending_bios_fn; device->work.func = pending_bios_fn;
generate_random_uuid(device->uuid); generate_random_uuid(device->uuid);
...@@ -1647,6 +1687,8 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path) ...@@ -1647,6 +1687,8 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
root->fs_info->fs_devices->num_devices++; root->fs_info->fs_devices->num_devices++;
root->fs_info->fs_devices->open_devices++; root->fs_info->fs_devices->open_devices++;
root->fs_info->fs_devices->rw_devices++; root->fs_info->fs_devices->rw_devices++;
if (device->can_discard)
root->fs_info->fs_devices->num_can_discard++;
root->fs_info->fs_devices->total_rw_bytes += device->total_bytes; root->fs_info->fs_devices->total_rw_bytes += device->total_bytes;
if (!blk_queue_nonrot(bdev_get_queue(bdev))) if (!blk_queue_nonrot(bdev_get_queue(bdev)))
...@@ -2413,9 +2455,10 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans, ...@@ -2413,9 +2455,10 @@ static int __btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
total_avail = device->total_bytes - device->bytes_used; total_avail = device->total_bytes - device->bytes_used;
else else
total_avail = 0; total_avail = 0;
/* avail is off by max(alloc_start, 1MB), but that is the same
* for all devices, so it doesn't hurt the sorting later on /* If there is no space on this device, skip it. */
*/ if (total_avail == 0)
continue;
ret = find_free_dev_extent(trans, device, ret = find_free_dev_extent(trans, device,
max_stripe_size * dev_stripes, max_stripe_size * dev_stripes,
......
...@@ -48,6 +48,7 @@ struct btrfs_device { ...@@ -48,6 +48,7 @@ struct btrfs_device {
int writeable; int writeable;
int in_fs_metadata; int in_fs_metadata;
int missing; int missing;
int can_discard;
spinlock_t io_lock; spinlock_t io_lock;
...@@ -104,6 +105,7 @@ struct btrfs_fs_devices { ...@@ -104,6 +105,7 @@ struct btrfs_fs_devices {
u64 rw_devices; u64 rw_devices;
u64 missing_devices; u64 missing_devices;
u64 total_rw_bytes; u64 total_rw_bytes;
u64 num_can_discard;
struct block_device *latest_bdev; struct block_device *latest_bdev;
/* all of the devices in the FS, protected by a mutex /* all of the devices in the FS, protected by a mutex
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment