Commit 97d0bf96 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-5.5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux

Pull btrfs updates from David Sterba:
 "User visible changes:
   - new block group profiles: RAID1 with 3- and 4- copies
       - RAID1 in btrfs has always 2 copies, now add support for 3 and 4
       - this is an incompat feature (named RAID1C34)
       - recommended use of RAID1C3 is replacement of RAID6 profile on
         metadata, this brings a more reliable resiliency against 2
         device loss/damage

   - support for new checksums
       - per-filesystem, set at mkfs time
       - fast hash (crc32c successor): xxhash, 64bit digest
       - strong hashes (both 256bit): sha256 (slower, FIPS), blake2b
         (faster)
       - the blake2b module goes via the crypto tree, btrfs.ko has a
         soft dependency

   - speed up lseek, don't take inode locks unnecessarily, this can
     speed up parallel SEEK_CUR/SEEK_SET/SEEK_END by 80%

   - send:
       - allow clone operations within the same file
       - limit maximum number of sent clone references to avoid slow
         backref walking

   - error message improvements: device scan prints process name and PID

  Core changes:
   - cleanups
       - remove unique workqueue helpers, used to provide a way to avoid
         deadlocks in the workqueue code, now done in a simpler way
       - remove lots of indirect function calls in compression code
       - extent IO tree code moved out of extent_io.c
       - cleanup backup superblock handling at mount time
       - transaction life cycle documentation and cleanups
       - locking code cleanups, annotations and documentation
       - add more cold, const, pure function attributes
       - removal of unused or redundant struct members or variables

   - new tree-checker sanity tests
       - try to detect missing INODE_ITEM, cross-reference checks of
         DIR_ITEM, DIR_INDEX, INODE_REF, and XATTR_* items

   - remove own bio scheduling code (used to avoid checksum submissions
     being stuck behind other IO), replaced by cgroup controller-based
     code to allow better control and avoid priority inversions in cases
     where the custom and cgroup scheduling disagreed

  Fixes:
   - avoid getting stuck during cyclic writebacks

   - fix trimming of ranges crossing block group boundaries

   - fix rename exchange on subvolumes, all involved subvolumes need to
     be recorded in the transaction"

* tag 'for-5.5-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux: (137 commits)
  btrfs: drop bdev argument from submit_extent_page
  btrfs: remove extent_map::bdev
  btrfs: drop bio_set_dev where not needed
  btrfs: get bdev directly from fs_devices in submit_extent_page
  btrfs: record all roots for rename exchange on a subvol
  Btrfs: fix block group remaining RO forever after error during device replace
  btrfs: scrub: Don't check free space before marking a block group RO
  btrfs: change btrfs_fs_devices::rotating to bool
  btrfs: change btrfs_fs_devices::seeding to bool
  btrfs: rename btrfs_block_group_cache
  btrfs: block-group: Reuse the item key from caller of read_one_block_group()
  btrfs: block-group: Refactor btrfs_read_block_groups()
  btrfs: document extent buffer locking
  btrfs: access eb::blocking_writers according to ACCESS_ONCE policies
  btrfs: set blocking_writers directly, no increment or decrement
  btrfs: merge blocking_writers branches in btrfs_tree_read_lock
  btrfs: drop incompat bit for raid1c34 after last block group is gone
  btrfs: add incompat for raid1 with 3, 4 copies
  btrfs: add support for 4-copy replication (raid1c4)
  btrfs: add support for 3-copy replication (raid1c3)
  ...
parents 1b88176b fa17ed06
...@@ -5,6 +5,8 @@ config BTRFS_FS ...@@ -5,6 +5,8 @@ config BTRFS_FS
select CRYPTO select CRYPTO
select CRYPTO_CRC32C select CRYPTO_CRC32C
select LIBCRC32C select LIBCRC32C
select CRYPTO_XXHASH
select CRYPTO_SHA256
select ZLIB_INFLATE select ZLIB_INFLATE
select ZLIB_DEFLATE select ZLIB_DEFLATE
select LZO_COMPRESS select LZO_COMPRESS
......
...@@ -53,24 +53,12 @@ struct btrfs_workqueue { ...@@ -53,24 +53,12 @@ struct btrfs_workqueue {
struct __btrfs_workqueue *high; struct __btrfs_workqueue *high;
}; };
static void normal_work_helper(struct btrfs_work *work); struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct __btrfs_workqueue *wq)
#define BTRFS_WORK_HELPER(name) \
noinline_for_stack void btrfs_##name(struct work_struct *arg) \
{ \
struct btrfs_work *work = container_of(arg, struct btrfs_work, \
normal_work); \
normal_work_helper(work); \
}
struct btrfs_fs_info *
btrfs_workqueue_owner(const struct __btrfs_workqueue *wq)
{ {
return wq->fs_info; return wq->fs_info;
} }
struct btrfs_fs_info * struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work)
btrfs_work_owner(const struct btrfs_work *work)
{ {
return work->wq->fs_info; return work->wq->fs_info;
} }
...@@ -89,29 +77,6 @@ bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq) ...@@ -89,29 +77,6 @@ bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq)
return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2; return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2;
} }
BTRFS_WORK_HELPER(worker_helper);
BTRFS_WORK_HELPER(delalloc_helper);
BTRFS_WORK_HELPER(flush_delalloc_helper);
BTRFS_WORK_HELPER(cache_helper);
BTRFS_WORK_HELPER(submit_helper);
BTRFS_WORK_HELPER(fixup_helper);
BTRFS_WORK_HELPER(endio_helper);
BTRFS_WORK_HELPER(endio_meta_helper);
BTRFS_WORK_HELPER(endio_meta_write_helper);
BTRFS_WORK_HELPER(endio_raid56_helper);
BTRFS_WORK_HELPER(endio_repair_helper);
BTRFS_WORK_HELPER(rmw_helper);
BTRFS_WORK_HELPER(endio_write_helper);
BTRFS_WORK_HELPER(freespace_write_helper);
BTRFS_WORK_HELPER(delayed_meta_helper);
BTRFS_WORK_HELPER(readahead_helper);
BTRFS_WORK_HELPER(qgroup_rescan_helper);
BTRFS_WORK_HELPER(extent_refs_helper);
BTRFS_WORK_HELPER(scrub_helper);
BTRFS_WORK_HELPER(scrubwrc_helper);
BTRFS_WORK_HELPER(scrubnc_helper);
BTRFS_WORK_HELPER(scrubparity_helper);
static struct __btrfs_workqueue * static struct __btrfs_workqueue *
__btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name, __btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name,
unsigned int flags, int limit_active, int thresh) unsigned int flags, int limit_active, int thresh)
...@@ -252,16 +217,16 @@ static inline void thresh_exec_hook(struct __btrfs_workqueue *wq) ...@@ -252,16 +217,16 @@ static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
} }
} }
static void run_ordered_work(struct __btrfs_workqueue *wq) static void run_ordered_work(struct __btrfs_workqueue *wq,
struct btrfs_work *self)
{ {
struct list_head *list = &wq->ordered_list; struct list_head *list = &wq->ordered_list;
struct btrfs_work *work; struct btrfs_work *work;
spinlock_t *lock = &wq->list_lock; spinlock_t *lock = &wq->list_lock;
unsigned long flags; unsigned long flags;
bool free_self = false;
while (1) { while (1) {
void *wtag;
spin_lock_irqsave(lock, flags); spin_lock_irqsave(lock, flags);
if (list_empty(list)) if (list_empty(list))
break; break;
...@@ -287,22 +252,53 @@ static void run_ordered_work(struct __btrfs_workqueue *wq) ...@@ -287,22 +252,53 @@ static void run_ordered_work(struct __btrfs_workqueue *wq)
list_del(&work->ordered_list); list_del(&work->ordered_list);
spin_unlock_irqrestore(lock, flags); spin_unlock_irqrestore(lock, flags);
if (work == self) {
/* /*
* We don't want to call the ordered free functions with the * This is the work item that the worker is currently
* lock held though. Save the work as tag for the trace event, * executing.
* because the callback could free the structure. *
* The kernel workqueue code guarantees non-reentrancy
* of work items. I.e., if a work item with the same
* address and work function is queued twice, the second
* execution is blocked until the first one finishes. A
* work item may be freed and recycled with the same
* work function; the workqueue code assumes that the
* original work item cannot depend on the recycled work
* item in that case (see find_worker_executing_work()).
*
* Note that different types of Btrfs work can depend on
* each other, and one type of work on one Btrfs
* filesystem may even depend on the same type of work
* on another Btrfs filesystem via, e.g., a loop device.
* Therefore, we must not allow the current work item to
* be recycled until we are really done, otherwise we
* break the above assumption and can deadlock.
*/
free_self = true;
} else {
/*
* We don't want to call the ordered free functions with
* the lock held.
*/ */
wtag = work;
work->ordered_free(work); work->ordered_free(work);
trace_btrfs_all_work_done(wq->fs_info, wtag); /* NB: work must not be dereferenced past this point. */
trace_btrfs_all_work_done(wq->fs_info, work);
}
} }
spin_unlock_irqrestore(lock, flags); spin_unlock_irqrestore(lock, flags);
if (free_self) {
self->ordered_free(self);
/* NB: self must not be dereferenced past this point. */
trace_btrfs_all_work_done(wq->fs_info, self);
}
} }
static void normal_work_helper(struct btrfs_work *work) static void btrfs_work_helper(struct work_struct *normal_work)
{ {
struct btrfs_work *work = container_of(normal_work, struct btrfs_work,
normal_work);
struct __btrfs_workqueue *wq; struct __btrfs_workqueue *wq;
void *wtag;
int need_order = 0; int need_order = 0;
/* /*
...@@ -316,29 +312,26 @@ static void normal_work_helper(struct btrfs_work *work) ...@@ -316,29 +312,26 @@ static void normal_work_helper(struct btrfs_work *work)
if (work->ordered_func) if (work->ordered_func)
need_order = 1; need_order = 1;
wq = work->wq; wq = work->wq;
/* Safe for tracepoints in case work gets freed by the callback */
wtag = work;
trace_btrfs_work_sched(work); trace_btrfs_work_sched(work);
thresh_exec_hook(wq); thresh_exec_hook(wq);
work->func(work); work->func(work);
if (need_order) { if (need_order) {
set_bit(WORK_DONE_BIT, &work->flags); set_bit(WORK_DONE_BIT, &work->flags);
run_ordered_work(wq); run_ordered_work(wq, work);
} else {
/* NB: work must not be dereferenced past this point. */
trace_btrfs_all_work_done(wq->fs_info, work);
} }
if (!need_order)
trace_btrfs_all_work_done(wq->fs_info, wtag);
} }
void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t uniq_func, void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
btrfs_func_t func, btrfs_func_t ordered_func, btrfs_func_t ordered_free)
btrfs_func_t ordered_func,
btrfs_func_t ordered_free)
{ {
work->func = func; work->func = func;
work->ordered_func = ordered_func; work->ordered_func = ordered_func;
work->ordered_free = ordered_free; work->ordered_free = ordered_free;
INIT_WORK(&work->normal_work, uniq_func); INIT_WORK(&work->normal_work, btrfs_work_helper);
INIT_LIST_HEAD(&work->ordered_list); INIT_LIST_HEAD(&work->ordered_list);
work->flags = 0; work->flags = 0;
} }
......
...@@ -29,49 +29,20 @@ struct btrfs_work { ...@@ -29,49 +29,20 @@ struct btrfs_work {
unsigned long flags; unsigned long flags;
}; };
#define BTRFS_WORK_HELPER_PROTO(name) \
void btrfs_##name(struct work_struct *arg)
BTRFS_WORK_HELPER_PROTO(worker_helper);
BTRFS_WORK_HELPER_PROTO(delalloc_helper);
BTRFS_WORK_HELPER_PROTO(flush_delalloc_helper);
BTRFS_WORK_HELPER_PROTO(cache_helper);
BTRFS_WORK_HELPER_PROTO(submit_helper);
BTRFS_WORK_HELPER_PROTO(fixup_helper);
BTRFS_WORK_HELPER_PROTO(endio_helper);
BTRFS_WORK_HELPER_PROTO(endio_meta_helper);
BTRFS_WORK_HELPER_PROTO(endio_meta_write_helper);
BTRFS_WORK_HELPER_PROTO(endio_raid56_helper);
BTRFS_WORK_HELPER_PROTO(endio_repair_helper);
BTRFS_WORK_HELPER_PROTO(rmw_helper);
BTRFS_WORK_HELPER_PROTO(endio_write_helper);
BTRFS_WORK_HELPER_PROTO(freespace_write_helper);
BTRFS_WORK_HELPER_PROTO(delayed_meta_helper);
BTRFS_WORK_HELPER_PROTO(readahead_helper);
BTRFS_WORK_HELPER_PROTO(qgroup_rescan_helper);
BTRFS_WORK_HELPER_PROTO(extent_refs_helper);
BTRFS_WORK_HELPER_PROTO(scrub_helper);
BTRFS_WORK_HELPER_PROTO(scrubwrc_helper);
BTRFS_WORK_HELPER_PROTO(scrubnc_helper);
BTRFS_WORK_HELPER_PROTO(scrubparity_helper);
struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
const char *name, const char *name,
unsigned int flags, unsigned int flags,
int limit_active, int limit_active,
int thresh); int thresh);
void btrfs_init_work(struct btrfs_work *work, btrfs_work_func_t helper, void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
btrfs_func_t func, btrfs_func_t ordered_func, btrfs_func_t ordered_free);
btrfs_func_t ordered_func,
btrfs_func_t ordered_free);
void btrfs_queue_work(struct btrfs_workqueue *wq, void btrfs_queue_work(struct btrfs_workqueue *wq,
struct btrfs_work *work); struct btrfs_work *work);
void btrfs_destroy_workqueue(struct btrfs_workqueue *wq); void btrfs_destroy_workqueue(struct btrfs_workqueue *wq);
void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max); void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max);
void btrfs_set_work_high_priority(struct btrfs_work *work); void btrfs_set_work_high_priority(struct btrfs_work *work);
struct btrfs_fs_info *btrfs_work_owner(const struct btrfs_work *work); struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work);
struct btrfs_fs_info *btrfs_workqueue_owner(const struct __btrfs_workqueue *wq); struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct __btrfs_workqueue *wq);
bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq); bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq);
#endif #endif
This diff is collapsed.
...@@ -34,7 +34,7 @@ struct btrfs_caching_control { ...@@ -34,7 +34,7 @@ struct btrfs_caching_control {
struct mutex mutex; struct mutex mutex;
wait_queue_head_t wait; wait_queue_head_t wait;
struct btrfs_work work; struct btrfs_work work;
struct btrfs_block_group_cache *block_group; struct btrfs_block_group *block_group;
u64 progress; u64 progress;
refcount_t count; refcount_t count;
}; };
...@@ -42,14 +42,15 @@ struct btrfs_caching_control { ...@@ -42,14 +42,15 @@ struct btrfs_caching_control {
/* Once caching_thread() finds this much free space, it will wake up waiters. */ /* Once caching_thread() finds this much free space, it will wake up waiters. */
#define CACHING_CTL_WAKE_UP SZ_2M #define CACHING_CTL_WAKE_UP SZ_2M
struct btrfs_block_group_cache { struct btrfs_block_group {
struct btrfs_key key;
struct btrfs_block_group_item item;
struct btrfs_fs_info *fs_info; struct btrfs_fs_info *fs_info;
struct inode *inode; struct inode *inode;
spinlock_t lock; spinlock_t lock;
u64 start;
u64 length;
u64 pinned; u64 pinned;
u64 reserved; u64 reserved;
u64 used;
u64 delalloc_bytes; u64 delalloc_bytes;
u64 bytes_super; u64 bytes_super;
u64 flags; u64 flags;
...@@ -159,7 +160,7 @@ struct btrfs_block_group_cache { ...@@ -159,7 +160,7 @@ struct btrfs_block_group_cache {
#ifdef CONFIG_BTRFS_DEBUG #ifdef CONFIG_BTRFS_DEBUG
static inline int btrfs_should_fragment_free_space( static inline int btrfs_should_fragment_free_space(
struct btrfs_block_group_cache *block_group) struct btrfs_block_group *block_group)
{ {
struct btrfs_fs_info *fs_info = block_group->fs_info; struct btrfs_fs_info *fs_info = block_group->fs_info;
...@@ -170,29 +171,29 @@ static inline int btrfs_should_fragment_free_space( ...@@ -170,29 +171,29 @@ static inline int btrfs_should_fragment_free_space(
} }
#endif #endif
struct btrfs_block_group_cache *btrfs_lookup_first_block_group( struct btrfs_block_group *btrfs_lookup_first_block_group(
struct btrfs_fs_info *info, u64 bytenr); struct btrfs_fs_info *info, u64 bytenr);
struct btrfs_block_group_cache *btrfs_lookup_block_group( struct btrfs_block_group *btrfs_lookup_block_group(
struct btrfs_fs_info *info, u64 bytenr); struct btrfs_fs_info *info, u64 bytenr);
struct btrfs_block_group_cache *btrfs_next_block_group( struct btrfs_block_group *btrfs_next_block_group(
struct btrfs_block_group_cache *cache); struct btrfs_block_group *cache);
void btrfs_get_block_group(struct btrfs_block_group_cache *cache); void btrfs_get_block_group(struct btrfs_block_group *cache);
void btrfs_put_block_group(struct btrfs_block_group_cache *cache); void btrfs_put_block_group(struct btrfs_block_group *cache);
void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info, void btrfs_dec_block_group_reservations(struct btrfs_fs_info *fs_info,
const u64 start); const u64 start);
void btrfs_wait_block_group_reservations(struct btrfs_block_group_cache *bg); void btrfs_wait_block_group_reservations(struct btrfs_block_group *bg);
bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); bool btrfs_inc_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr); void btrfs_dec_nocow_writers(struct btrfs_fs_info *fs_info, u64 bytenr);
void btrfs_wait_nocow_writers(struct btrfs_block_group_cache *bg); void btrfs_wait_nocow_writers(struct btrfs_block_group *bg);
void btrfs_wait_block_group_cache_progress(struct btrfs_block_group_cache *cache, void btrfs_wait_block_group_cache_progress(struct btrfs_block_group *cache,
u64 num_bytes); u64 num_bytes);
int btrfs_wait_block_group_cache_done(struct btrfs_block_group_cache *cache); int btrfs_wait_block_group_cache_done(struct btrfs_block_group *cache);
int btrfs_cache_block_group(struct btrfs_block_group_cache *cache, int btrfs_cache_block_group(struct btrfs_block_group *cache,
int load_cache_only); int load_cache_only);
void btrfs_put_caching_control(struct btrfs_caching_control *ctl); void btrfs_put_caching_control(struct btrfs_caching_control *ctl);
struct btrfs_caching_control *btrfs_get_caching_control( struct btrfs_caching_control *btrfs_get_caching_control(
struct btrfs_block_group_cache *cache); struct btrfs_block_group *cache);
u64 add_new_free_space(struct btrfs_block_group_cache *block_group, u64 add_new_free_space(struct btrfs_block_group *block_group,
u64 start, u64 end); u64 start, u64 end);
struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
struct btrfs_fs_info *fs_info, struct btrfs_fs_info *fs_info,
...@@ -200,21 +201,22 @@ struct btrfs_trans_handle *btrfs_start_trans_remove_block_group( ...@@ -200,21 +201,22 @@ struct btrfs_trans_handle *btrfs_start_trans_remove_block_group(
int btrfs_remove_block_group(struct btrfs_trans_handle *trans, int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
u64 group_start, struct extent_map *em); u64 group_start, struct extent_map *em);
void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info); void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info);
void btrfs_mark_bg_unused(struct btrfs_block_group_cache *bg); void btrfs_mark_bg_unused(struct btrfs_block_group *bg);
int btrfs_read_block_groups(struct btrfs_fs_info *info); int btrfs_read_block_groups(struct btrfs_fs_info *info);
int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used, int btrfs_make_block_group(struct btrfs_trans_handle *trans, u64 bytes_used,
u64 type, u64 chunk_offset, u64 size); u64 type, u64 chunk_offset, u64 size);
void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans); void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans);
int btrfs_inc_block_group_ro(struct btrfs_block_group_cache *cache); int btrfs_inc_block_group_ro(struct btrfs_block_group *cache,
void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache); bool do_chunk_alloc);
void btrfs_dec_block_group_ro(struct btrfs_block_group *cache);
int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans); int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans);
int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans); int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans);
int btrfs_setup_space_cache(struct btrfs_trans_handle *trans); int btrfs_setup_space_cache(struct btrfs_trans_handle *trans);
int btrfs_update_block_group(struct btrfs_trans_handle *trans, int btrfs_update_block_group(struct btrfs_trans_handle *trans,
u64 bytenr, u64 num_bytes, int alloc); u64 bytenr, u64 num_bytes, int alloc);
int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache, int btrfs_add_reserved_bytes(struct btrfs_block_group *cache,
u64 ram_bytes, u64 num_bytes, int delalloc); u64 ram_bytes, u64 num_bytes, int delalloc);
void btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache, void btrfs_free_reserved_bytes(struct btrfs_block_group *cache,
u64 num_bytes, int delalloc); u64 num_bytes, int delalloc);
int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags, int btrfs_chunk_alloc(struct btrfs_trans_handle *trans, u64 flags,
enum btrfs_chunk_alloc_enum force); enum btrfs_chunk_alloc_enum force);
...@@ -239,8 +241,7 @@ static inline u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info) ...@@ -239,8 +241,7 @@ static inline u64 btrfs_system_alloc_profile(struct btrfs_fs_info *fs_info)
return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM); return btrfs_get_alloc_profile(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
} }
static inline int btrfs_block_group_cache_done( static inline int btrfs_block_group_done(struct btrfs_block_group *cache)
struct btrfs_block_group_cache *cache)
{ {
smp_mb(); smp_mb();
return cache->cached == BTRFS_CACHE_FINISHED || return cache->cached == BTRFS_CACHE_FINISHED ||
......
...@@ -63,9 +63,6 @@ struct btrfs_inode { ...@@ -63,9 +63,6 @@ struct btrfs_inode {
/* held while logging the inode in tree-log.c */ /* held while logging the inode in tree-log.c */
struct mutex log_mutex; struct mutex log_mutex;
/* held while doing delalloc reservations */
struct mutex delalloc_mutex;
/* used to order data wrt metadata */ /* used to order data wrt metadata */
struct btrfs_ordered_inode_tree ordered_tree; struct btrfs_ordered_inode_tree ordered_tree;
......
This diff is collapsed.
...@@ -93,7 +93,8 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start, ...@@ -93,7 +93,8 @@ blk_status_t btrfs_submit_compressed_write(struct inode *inode, u64 start,
unsigned long compressed_len, unsigned long compressed_len,
struct page **compressed_pages, struct page **compressed_pages,
unsigned long nr_pages, unsigned long nr_pages,
unsigned int write_flags); unsigned int write_flags,
struct cgroup_subsys_state *blkcg_css);
blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, blk_status_t btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
int mirror_num, unsigned long bio_flags); int mirror_num, unsigned long bio_flags);
...@@ -104,11 +105,10 @@ enum btrfs_compression_type { ...@@ -104,11 +105,10 @@ enum btrfs_compression_type {
BTRFS_COMPRESS_ZLIB = 1, BTRFS_COMPRESS_ZLIB = 1,
BTRFS_COMPRESS_LZO = 2, BTRFS_COMPRESS_LZO = 2,
BTRFS_COMPRESS_ZSTD = 3, BTRFS_COMPRESS_ZSTD = 3,
BTRFS_COMPRESS_TYPES = 3, BTRFS_NR_COMPRESS_TYPES = 4,
}; };
struct workspace_manager { struct workspace_manager {
const struct btrfs_compress_op *ops;
struct list_head idle_ws; struct list_head idle_ws;
spinlock_t ws_lock; spinlock_t ws_lock;
/* Number of free workspaces */ /* Number of free workspaces */
...@@ -119,50 +119,18 @@ struct workspace_manager { ...@@ -119,50 +119,18 @@ struct workspace_manager {
wait_queue_head_t ws_wait; wait_queue_head_t ws_wait;
}; };
void btrfs_init_workspace_manager(struct workspace_manager *wsm, struct list_head *btrfs_get_workspace(int type, unsigned int level);
const struct btrfs_compress_op *ops); void btrfs_put_workspace(int type, struct list_head *ws);
struct list_head *btrfs_get_workspace(struct workspace_manager *wsm,
unsigned int level);
void btrfs_put_workspace(struct workspace_manager *wsm, struct list_head *ws);
void btrfs_cleanup_workspace_manager(struct workspace_manager *wsm);
struct btrfs_compress_op { struct btrfs_compress_op {
void (*init_workspace_manager)(void); struct workspace_manager *workspace_manager;
void (*cleanup_workspace_manager)(void);
struct list_head *(*get_workspace)(unsigned int level);
void (*put_workspace)(struct list_head *ws);
struct list_head *(*alloc_workspace)(unsigned int level);
void (*free_workspace)(struct list_head *workspace);
int (*compress_pages)(struct list_head *workspace,
struct address_space *mapping,
u64 start,
struct page **pages,
unsigned long *out_pages,
unsigned long *total_in,
unsigned long *total_out);
int (*decompress_bio)(struct list_head *workspace,
struct compressed_bio *cb);
int (*decompress)(struct list_head *workspace,
unsigned char *data_in,
struct page *dest_page,
unsigned long start_byte,
size_t srclen, size_t destlen);
/* Maximum level supported by the compression algorithm */ /* Maximum level supported by the compression algorithm */
unsigned int max_level; unsigned int max_level;
unsigned int default_level; unsigned int default_level;
}; };
/* The heuristic workspaces are managed via the 0th workspace manager */ /* The heuristic workspaces are managed via the 0th workspace manager */
#define BTRFS_NR_WORKSPACE_MANAGERS (BTRFS_COMPRESS_TYPES + 1) #define BTRFS_NR_WORKSPACE_MANAGERS BTRFS_NR_COMPRESS_TYPES
extern const struct btrfs_compress_op btrfs_heuristic_compress; extern const struct btrfs_compress_op btrfs_heuristic_compress;
extern const struct btrfs_compress_op btrfs_zlib_compress; extern const struct btrfs_compress_op btrfs_zlib_compress;
......
...@@ -32,8 +32,13 @@ static void del_ptr(struct btrfs_root *root, struct btrfs_path *path, ...@@ -32,8 +32,13 @@ static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
static const struct btrfs_csums { static const struct btrfs_csums {
u16 size; u16 size;
const char *name; const char *name;
const char *driver;
} btrfs_csums[] = { } btrfs_csums[] = {
[BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" }, [BTRFS_CSUM_TYPE_CRC32] = { .size = 4, .name = "crc32c" },
[BTRFS_CSUM_TYPE_XXHASH] = { .size = 8, .name = "xxhash64" },
[BTRFS_CSUM_TYPE_SHA256] = { .size = 32, .name = "sha256" },
[BTRFS_CSUM_TYPE_BLAKE2] = { .size = 32, .name = "blake2b",
.driver = "blake2b-256" },
}; };
int btrfs_super_csum_size(const struct btrfs_super_block *s) int btrfs_super_csum_size(const struct btrfs_super_block *s)
...@@ -51,34 +56,25 @@ const char *btrfs_super_csum_name(u16 csum_type) ...@@ -51,34 +56,25 @@ const char *btrfs_super_csum_name(u16 csum_type)
return btrfs_csums[csum_type].name; return btrfs_csums[csum_type].name;
} }
struct btrfs_path *btrfs_alloc_path(void) /*
* Return driver name if defined, otherwise the name that's also a valid driver
* name
*/
const char *btrfs_super_csum_driver(u16 csum_type)
{ {
return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS); /* csum type is validated at mount time */
return btrfs_csums[csum_type].driver ?:
btrfs_csums[csum_type].name;
} }
/* size_t __const btrfs_get_num_csums(void)
* set all locked nodes in the path to blocking locks. This should
* be done before scheduling
*/
noinline void btrfs_set_path_blocking(struct btrfs_path *p)
{ {
int i; return ARRAY_SIZE(btrfs_csums);
for (i = 0; i < BTRFS_MAX_LEVEL; i++) { }
if (!p->nodes[i] || !p->locks[i])
continue; struct btrfs_path *btrfs_alloc_path(void)
/* {
* If we currently have a spinning reader or writer lock this return kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
* will bump the count of blocking holders and drop the
* spinlock.
*/
if (p->locks[i] == BTRFS_READ_LOCK) {
btrfs_set_lock_blocking_read(p->nodes[i]);
p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
} else if (p->locks[i] == BTRFS_WRITE_LOCK) {
btrfs_set_lock_blocking_write(p->nodes[i]);
p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
}
}
} }
/* this also releases the path */ /* this also releases the path */
...@@ -1125,7 +1121,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans, ...@@ -1125,7 +1121,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV) btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
parent_start = buf->start; parent_start = buf->start;
extent_buffer_get(cow); atomic_inc(&cow->refs);
ret = tree_mod_log_insert_root(root->node, cow, 1); ret = tree_mod_log_insert_root(root->node, cow, 1);
BUG_ON(ret < 0); BUG_ON(ret < 0);
rcu_assign_pointer(root->node, cow); rcu_assign_pointer(root->node, cow);
...@@ -1563,7 +1559,7 @@ static int comp_keys(const struct btrfs_disk_key *disk, ...@@ -1563,7 +1559,7 @@ static int comp_keys(const struct btrfs_disk_key *disk,
/* /*
* same as comp_keys only with two btrfs_key's * same as comp_keys only with two btrfs_key's
*/ */
int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2) int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2)
{ {
if (k1->objectid > k2->objectid) if (k1->objectid > k2->objectid)
return 1; return 1;
...@@ -2036,7 +2032,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans, ...@@ -2036,7 +2032,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
/* update the path */ /* update the path */
if (left) { if (left) {
if (btrfs_header_nritems(left) > orig_slot) { if (btrfs_header_nritems(left) > orig_slot) {
extent_buffer_get(left); atomic_inc(&left->refs);
/* left was locked after cow */ /* left was locked after cow */
path->nodes[level] = left; path->nodes[level] = left;
path->slots[level + 1] -= 1; path->slots[level + 1] -= 1;
...@@ -2378,32 +2374,6 @@ static noinline void unlock_up(struct btrfs_path *path, int level, ...@@ -2378,32 +2374,6 @@ static noinline void unlock_up(struct btrfs_path *path, int level,
} }
} }
/*
* This releases any locks held in the path starting at level and
* going all the way up to the root.
*
* btrfs_search_slot will keep the lock held on higher nodes in a few
* corner cases, such as COW of the block at slot zero in the node. This
* ignores those rules, and it should only be called when there are no
* more updates to be done higher up in the tree.
*/
noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
{
int i;
if (path->keep_locks)
return;
for (i = level; i < BTRFS_MAX_LEVEL; i++) {
if (!path->nodes[i])
continue;
if (!path->locks[i])
continue;
btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
path->locks[i] = 0;
}
}
/* /*
* helper function for btrfs_search_slot. The goal is to find a block * helper function for btrfs_search_slot. The goal is to find a block
* in cache without setting the path to blocking. If we find the block * in cache without setting the path to blocking. If we find the block
...@@ -2652,7 +2622,7 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root, ...@@ -2652,7 +2622,7 @@ static struct extent_buffer *btrfs_search_slot_get_root(struct btrfs_root *root,
} else { } else {
b = root->commit_root; b = root->commit_root;
extent_buffer_get(b); atomic_inc(&b->refs);
} }
level = btrfs_header_level(b); level = btrfs_header_level(b);
/* /*
...@@ -2785,12 +2755,10 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, ...@@ -2785,12 +2755,10 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
} }
while (b) { while (b) {
int dec = 0;
level = btrfs_header_level(b); level = btrfs_header_level(b);
/*
* setup the path here so we can release it under lock
* contention with the cow code
*/
if (cow) { if (cow) {
bool last_level = (level == (BTRFS_MAX_LEVEL - 1)); bool last_level = (level == (BTRFS_MAX_LEVEL - 1));
...@@ -2861,15 +2829,38 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, ...@@ -2861,15 +2829,38 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
if (ret < 0) if (ret < 0)
goto done; goto done;
if (level != 0) { if (level == 0) {
int dec = 0; p->slots[level] = slot;
if (ins_len > 0 &&
btrfs_leaf_free_space(b) < ins_len) {
if (write_lock_level < 1) {
write_lock_level = 1;
btrfs_release_path(p);
goto again;
}
btrfs_set_path_blocking(p);
err = split_leaf(trans, root, key,
p, ins_len, ret == 0);
BUG_ON(err > 0);
if (err) {
ret = err;
goto done;
}
}
if (!p->search_for_split)
unlock_up(p, level, lowest_unlock,
min_write_lock_level, NULL);
goto done;
}
if (ret && slot > 0) { if (ret && slot > 0) {
dec = 1; dec = 1;
slot -= 1; slot--;
} }
p->slots[level] = slot; p->slots[level] = slot;
err = setup_nodes_for_search(trans, root, p, b, level, err = setup_nodes_for_search(trans, root, p, b, level, ins_len,
ins_len, &write_lock_level); &write_lock_level);
if (err == -EAGAIN) if (err == -EAGAIN)
goto again; goto again;
if (err) { if (err) {
...@@ -2880,20 +2871,18 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, ...@@ -2880,20 +2871,18 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
slot = p->slots[level]; slot = p->slots[level];
/* /*
* slot 0 is special, if we change the key * Slot 0 is special, if we change the key we have to update
* we have to update the parent pointer * the parent pointer which means we must have a write lock on
* which means we must have a write lock * the parent
* on the parent
*/ */
if (slot == 0 && ins_len && if (slot == 0 && ins_len && write_lock_level < level + 1) {
write_lock_level < level + 1) {
write_lock_level = level + 1; write_lock_level = level + 1;
btrfs_release_path(p); btrfs_release_path(p);
goto again; goto again;
} }
unlock_up(p, level, lowest_unlock, unlock_up(p, level, lowest_unlock, min_write_lock_level,
min_write_lock_level, &write_lock_level); &write_lock_level);
if (level == lowest_level) { if (level == lowest_level) {
if (dec) if (dec)
...@@ -2901,8 +2890,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, ...@@ -2901,8 +2890,7 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
goto done; goto done;
} }
err = read_block_for_search(root, p, &b, level, err = read_block_for_search(root, p, &b, level, slot, key);
slot, key);
if (err == -EAGAIN) if (err == -EAGAIN)
goto again; goto again;
if (err) { if (err) {
...@@ -2927,31 +2915,6 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root, ...@@ -2927,31 +2915,6 @@ int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root *root,
} }
p->nodes[level] = b; p->nodes[level] = b;
} }
} else {
p->slots[level] = slot;
if (ins_len > 0 &&
btrfs_leaf_free_space(b) < ins_len) {
if (write_lock_level < 1) {
write_lock_level = 1;
btrfs_release_path(p);
goto again;
}
btrfs_set_path_blocking(p);
err = split_leaf(trans, root, key,
p, ins_len, ret == 0);
BUG_ON(err > 0);
if (err) {
ret = err;
goto done;
}
}
if (!p->search_for_split)
unlock_up(p, level, lowest_unlock,
min_write_lock_level, NULL);
goto done;
}
} }
ret = 1; ret = 1;
done: done:
...@@ -3008,6 +2971,8 @@ int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, ...@@ -3008,6 +2971,8 @@ int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
p->locks[level] = BTRFS_READ_LOCK; p->locks[level] = BTRFS_READ_LOCK;
while (b) { while (b) {
int dec = 0;
level = btrfs_header_level(b); level = btrfs_header_level(b);
p->nodes[level] = b; p->nodes[level] = b;
...@@ -3028,11 +2993,15 @@ int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, ...@@ -3028,11 +2993,15 @@ int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
if (ret < 0) if (ret < 0)
goto done; goto done;
if (level != 0) { if (level == 0) {
int dec = 0; p->slots[level] = slot;
unlock_up(p, level, lowest_unlock, 0, NULL);
goto done;
}
if (ret && slot > 0) { if (ret && slot > 0) {
dec = 1; dec = 1;
slot -= 1; slot--;
} }
p->slots[level] = slot; p->slots[level] = slot;
unlock_up(p, level, lowest_unlock, 0, NULL); unlock_up(p, level, lowest_unlock, 0, NULL);
...@@ -3043,8 +3012,7 @@ int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, ...@@ -3043,8 +3012,7 @@ int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
goto done; goto done;
} }
err = read_block_for_search(root, p, &b, level, err = read_block_for_search(root, p, &b, level, slot, key);
slot, key);
if (err == -EAGAIN) if (err == -EAGAIN)
goto again; goto again;
if (err) { if (err) {
...@@ -3064,11 +3032,6 @@ int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key, ...@@ -3064,11 +3032,6 @@ int btrfs_search_old_slot(struct btrfs_root *root, const struct btrfs_key *key,
} }
p->locks[level] = BTRFS_READ_LOCK; p->locks[level] = BTRFS_READ_LOCK;
p->nodes[level] = b; p->nodes[level] = b;
} else {
p->slots[level] = slot;
unlock_up(p, level, lowest_unlock, 0, NULL);
goto done;
}
} }
ret = 1; ret = 1;
done: done:
...@@ -3433,7 +3396,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans, ...@@ -3433,7 +3396,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
free_extent_buffer(old); free_extent_buffer(old);
add_root_to_dirty_list(root); add_root_to_dirty_list(root);
extent_buffer_get(c); atomic_inc(&c->refs);
path->nodes[level] = c; path->nodes[level] = c;
path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING; path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
path->slots[level] = 0; path->slots[level] = 0;
...@@ -4966,7 +4929,7 @@ static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans, ...@@ -4966,7 +4929,7 @@ static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
root_sub_used(root, leaf->len); root_sub_used(root, leaf->len);
extent_buffer_get(leaf); atomic_inc(&leaf->refs);
btrfs_free_tree_block(trans, root, leaf, 0, 1); btrfs_free_tree_block(trans, root, leaf, 0, 1);
free_extent_buffer_stale(leaf); free_extent_buffer_stale(leaf);
} }
...@@ -5047,7 +5010,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, ...@@ -5047,7 +5010,7 @@ int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
* for possible call to del_ptr below * for possible call to del_ptr below
*/ */
slot = path->slots[1]; slot = path->slots[1];
extent_buffer_get(leaf); atomic_inc(&leaf->refs);
btrfs_set_path_blocking(path); btrfs_set_path_blocking(path);
wret = push_leaf_left(trans, root, path, 1, 1, wret = push_leaf_left(trans, root, path, 1, 1,
......
...@@ -28,6 +28,7 @@ ...@@ -28,6 +28,7 @@
#include <linux/dynamic_debug.h> #include <linux/dynamic_debug.h>
#include <linux/refcount.h> #include <linux/refcount.h>
#include <linux/crc32c.h> #include <linux/crc32c.h>
#include "extent-io-tree.h"
#include "extent_io.h" #include "extent_io.h"
#include "extent_map.h" #include "extent_map.h"
#include "async-thread.h" #include "async-thread.h"
...@@ -38,7 +39,7 @@ struct btrfs_transaction; ...@@ -38,7 +39,7 @@ struct btrfs_transaction;
struct btrfs_pending_snapshot; struct btrfs_pending_snapshot;
struct btrfs_delayed_ref_root; struct btrfs_delayed_ref_root;
struct btrfs_space_info; struct btrfs_space_info;
struct btrfs_block_group_cache; struct btrfs_block_group;
extern struct kmem_cache *btrfs_trans_handle_cachep; extern struct kmem_cache *btrfs_trans_handle_cachep;
extern struct kmem_cache *btrfs_bit_radix_cachep; extern struct kmem_cache *btrfs_bit_radix_cachep;
extern struct kmem_cache *btrfs_path_cachep; extern struct kmem_cache *btrfs_path_cachep;
...@@ -56,9 +57,9 @@ struct btrfs_ref; ...@@ -56,9 +57,9 @@ struct btrfs_ref;
* filesystem data as well that can be used to read data in order to repair * filesystem data as well that can be used to read data in order to repair
* read errors on other disks. * read errors on other disks.
* *
* Current value is derived from RAID1 with 2 copies. * Current value is derived from RAID1C4 with 4 copies.
*/ */
#define BTRFS_MAX_MIRRORS (2 + 1) #define BTRFS_MAX_MIRRORS (4 + 1)
#define BTRFS_MAX_LEVEL 8 #define BTRFS_MAX_LEVEL 8
...@@ -291,7 +292,8 @@ struct btrfs_super_block { ...@@ -291,7 +292,8 @@ struct btrfs_super_block {
BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \ BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \
BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \ BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \
BTRFS_FEATURE_INCOMPAT_NO_HOLES | \ BTRFS_FEATURE_INCOMPAT_NO_HOLES | \
BTRFS_FEATURE_INCOMPAT_METADATA_UUID) BTRFS_FEATURE_INCOMPAT_METADATA_UUID | \
BTRFS_FEATURE_INCOMPAT_RAID1C34)
#define BTRFS_FEATURE_INCOMPAT_SAFE_SET \ #define BTRFS_FEATURE_INCOMPAT_SAFE_SET \
(BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF) (BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF)
...@@ -413,7 +415,7 @@ struct btrfs_free_cluster { ...@@ -413,7 +415,7 @@ struct btrfs_free_cluster {
/* We did a full search and couldn't create a cluster */ /* We did a full search and couldn't create a cluster */
bool fragmented; bool fragmented;
struct btrfs_block_group_cache *block_group; struct btrfs_block_group *block_group;
/* /*
* when a cluster is allocated from a block group, we put the * when a cluster is allocated from a block group, we put the
* cluster onto a list in the block group so that it can * cluster onto a list in the block group so that it can
...@@ -476,8 +478,8 @@ struct btrfs_swapfile_pin { ...@@ -476,8 +478,8 @@ struct btrfs_swapfile_pin {
void *ptr; void *ptr;
struct inode *inode; struct inode *inode;
/* /*
* If true, ptr points to a struct btrfs_block_group_cache. Otherwise, * If true, ptr points to a struct btrfs_block_group. Otherwise, ptr
* ptr points to a struct btrfs_device. * points to a struct btrfs_device.
*/ */
bool is_block_group; bool is_block_group;
}; };
...@@ -722,7 +724,6 @@ struct btrfs_fs_info { ...@@ -722,7 +724,6 @@ struct btrfs_fs_info {
struct btrfs_workqueue *endio_meta_write_workers; struct btrfs_workqueue *endio_meta_write_workers;
struct btrfs_workqueue *endio_write_workers; struct btrfs_workqueue *endio_write_workers;
struct btrfs_workqueue *endio_freespace_worker; struct btrfs_workqueue *endio_freespace_worker;
struct btrfs_workqueue *submit_workers;
struct btrfs_workqueue *caching_workers; struct btrfs_workqueue *caching_workers;
struct btrfs_workqueue *readahead_workers; struct btrfs_workqueue *readahead_workers;
...@@ -1519,18 +1520,18 @@ static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb, ...@@ -1519,18 +1520,18 @@ static inline u64 btrfs_stripe_devid_nr(struct extent_buffer *eb,
} }
/* struct btrfs_block_group_item */ /* struct btrfs_block_group_item */
BTRFS_SETGET_STACK_FUNCS(block_group_used, struct btrfs_block_group_item, BTRFS_SETGET_STACK_FUNCS(stack_block_group_used, struct btrfs_block_group_item,
used, 64); used, 64);
BTRFS_SETGET_FUNCS(disk_block_group_used, struct btrfs_block_group_item, BTRFS_SETGET_FUNCS(block_group_used, struct btrfs_block_group_item,
used, 64); used, 64);
BTRFS_SETGET_STACK_FUNCS(block_group_chunk_objectid, BTRFS_SETGET_STACK_FUNCS(stack_block_group_chunk_objectid,
struct btrfs_block_group_item, chunk_objectid, 64); struct btrfs_block_group_item, chunk_objectid, 64);
BTRFS_SETGET_FUNCS(disk_block_group_chunk_objectid, BTRFS_SETGET_FUNCS(block_group_chunk_objectid,
struct btrfs_block_group_item, chunk_objectid, 64); struct btrfs_block_group_item, chunk_objectid, 64);
BTRFS_SETGET_FUNCS(disk_block_group_flags, BTRFS_SETGET_FUNCS(block_group_flags,
struct btrfs_block_group_item, flags, 64); struct btrfs_block_group_item, flags, 64);
BTRFS_SETGET_STACK_FUNCS(block_group_flags, BTRFS_SETGET_STACK_FUNCS(stack_block_group_flags,
struct btrfs_block_group_item, flags, 64); struct btrfs_block_group_item, flags, 64);
/* struct btrfs_free_space_info */ /* struct btrfs_free_space_info */
...@@ -2163,6 +2164,9 @@ BTRFS_SETGET_STACK_FUNCS(super_uuid_tree_generation, struct btrfs_super_block, ...@@ -2163,6 +2164,9 @@ BTRFS_SETGET_STACK_FUNCS(super_uuid_tree_generation, struct btrfs_super_block,
int btrfs_super_csum_size(const struct btrfs_super_block *s); int btrfs_super_csum_size(const struct btrfs_super_block *s);
const char *btrfs_super_csum_name(u16 csum_type); const char *btrfs_super_csum_name(u16 csum_type);
const char *btrfs_super_csum_driver(u16 csum_type);
size_t __const btrfs_get_num_csums(void);
/* /*
* The leaf data grows from end-to-front in the node. * The leaf data grows from end-to-front in the node.
...@@ -2397,7 +2401,7 @@ static inline u64 btrfs_calc_metadata_size(struct btrfs_fs_info *fs_info, ...@@ -2397,7 +2401,7 @@ static inline u64 btrfs_calc_metadata_size(struct btrfs_fs_info *fs_info,
int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info, int btrfs_add_excluded_extent(struct btrfs_fs_info *fs_info,
u64 start, u64 num_bytes); u64 start, u64 num_bytes);
void btrfs_free_excluded_extents(struct btrfs_block_group_cache *cache); void btrfs_free_excluded_extents(struct btrfs_block_group *cache);
int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans, int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
unsigned long count); unsigned long count);
void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info, void btrfs_cleanup_ref_head_accounting(struct btrfs_fs_info *fs_info,
...@@ -2453,8 +2457,8 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans, ...@@ -2453,8 +2457,8 @@ int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
struct btrfs_ref *generic_ref); struct btrfs_ref *generic_ref);
int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr); int btrfs_extent_readonly(struct btrfs_fs_info *fs_info, u64 bytenr);
void btrfs_get_block_group_trimming(struct btrfs_block_group_cache *cache); void btrfs_get_block_group_trimming(struct btrfs_block_group *cache);
void btrfs_put_block_group_trimming(struct btrfs_block_group_cache *cache); void btrfs_put_block_group_trimming(struct btrfs_block_group *cache);
void btrfs_clear_space_info_full(struct btrfs_fs_info *info); void btrfs_clear_space_info_full(struct btrfs_fs_info *info);
enum btrfs_reserve_flush_enum { enum btrfs_reserve_flush_enum {
...@@ -2507,7 +2511,7 @@ void btrfs_wait_for_snapshot_creation(struct btrfs_root *root); ...@@ -2507,7 +2511,7 @@ void btrfs_wait_for_snapshot_creation(struct btrfs_root *root);
/* ctree.c */ /* ctree.c */
int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key, int btrfs_bin_search(struct extent_buffer *eb, const struct btrfs_key *key,
int level, int *slot); int level, int *slot);
int btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2); int __pure btrfs_comp_cpu_keys(const struct btrfs_key *k1, const struct btrfs_key *k2);
int btrfs_previous_item(struct btrfs_root *root, int btrfs_previous_item(struct btrfs_root *root,
struct btrfs_path *path, u64 min_objectid, struct btrfs_path *path, u64 min_objectid,
int type); int type);
...@@ -2567,8 +2571,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans, ...@@ -2567,8 +2571,6 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
void btrfs_release_path(struct btrfs_path *p); void btrfs_release_path(struct btrfs_path *p);
struct btrfs_path *btrfs_alloc_path(void); struct btrfs_path *btrfs_alloc_path(void);
void btrfs_free_path(struct btrfs_path *p); void btrfs_free_path(struct btrfs_path *p);
void btrfs_set_path_blocking(struct btrfs_path *p);
void btrfs_unlock_up_safe(struct btrfs_path *p, int level);
int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root, int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
struct btrfs_path *path, int slot, int nr); struct btrfs_path *path, int slot, int nr);
...@@ -2870,10 +2872,9 @@ int btrfs_drop_inode(struct inode *inode); ...@@ -2870,10 +2872,9 @@ int btrfs_drop_inode(struct inode *inode);
int __init btrfs_init_cachep(void); int __init btrfs_init_cachep(void);
void __cold btrfs_destroy_cachep(void); void __cold btrfs_destroy_cachep(void);
struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location, struct inode *btrfs_iget_path(struct super_block *s, struct btrfs_key *location,
struct btrfs_root *root, int *new, struct btrfs_root *root, struct btrfs_path *path);
struct btrfs_path *path);
struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location, struct inode *btrfs_iget(struct super_block *s, struct btrfs_key *location,
struct btrfs_root *root, int *was_new); struct btrfs_root *root);
struct extent_map *btrfs_get_extent(struct btrfs_inode *inode, struct extent_map *btrfs_get_extent(struct btrfs_inode *inode,
struct page *page, size_t pg_offset, struct page *page, size_t pg_offset,
u64 start, u64 end, int create); u64 start, u64 end, int create);
...@@ -2909,7 +2910,7 @@ long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg); ...@@ -2909,7 +2910,7 @@ long btrfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg); long btrfs_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
int btrfs_ioctl_get_supported_features(void __user *arg); int btrfs_ioctl_get_supported_features(void __user *arg);
void btrfs_sync_inode_flags_to_i_flags(struct inode *inode); void btrfs_sync_inode_flags_to_i_flags(struct inode *inode);
int btrfs_is_empty_uuid(u8 *uuid); int __pure btrfs_is_empty_uuid(u8 *uuid);
int btrfs_defrag_file(struct inode *inode, struct file *file, int btrfs_defrag_file(struct inode *inode, struct file *file,
struct btrfs_ioctl_defrag_range_args *range, struct btrfs_ioctl_defrag_range_args *range,
u64 newer_than, unsigned long max_pages); u64 newer_than, unsigned long max_pages);
...@@ -3143,7 +3144,7 @@ __cold ...@@ -3143,7 +3144,7 @@ __cold
void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function, void __btrfs_handle_fs_error(struct btrfs_fs_info *fs_info, const char *function,
unsigned int line, int errno, const char *fmt, ...); unsigned int line, int errno, const char *fmt, ...);
const char *btrfs_decode_error(int errno); const char * __attribute_const__ btrfs_decode_error(int errno);
__cold __cold
void __btrfs_abort_transaction(struct btrfs_trans_handle *trans, void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
......
...@@ -307,7 +307,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes) ...@@ -307,7 +307,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
unsigned nr_extents; unsigned nr_extents;
enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL; enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
int ret = 0; int ret = 0;
bool delalloc_lock = true;
/* /*
* If we are a free space inode we need to not flush since we will be in * If we are a free space inode we need to not flush since we will be in
...@@ -320,7 +319,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes) ...@@ -320,7 +319,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
*/ */
if (btrfs_is_free_space_inode(inode)) { if (btrfs_is_free_space_inode(inode)) {
flush = BTRFS_RESERVE_NO_FLUSH; flush = BTRFS_RESERVE_NO_FLUSH;
delalloc_lock = false;
} else { } else {
if (current->journal_info) if (current->journal_info)
flush = BTRFS_RESERVE_FLUSH_LIMIT; flush = BTRFS_RESERVE_FLUSH_LIMIT;
...@@ -329,9 +327,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes) ...@@ -329,9 +327,6 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
schedule_timeout(1); schedule_timeout(1);
} }
if (delalloc_lock)
mutex_lock(&inode->delalloc_mutex);
num_bytes = ALIGN(num_bytes, fs_info->sectorsize); num_bytes = ALIGN(num_bytes, fs_info->sectorsize);
/* /*
...@@ -348,10 +343,12 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes) ...@@ -348,10 +343,12 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
&qgroup_reserve); &qgroup_reserve);
ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserve, true); ret = btrfs_qgroup_reserve_meta_prealloc(root, qgroup_reserve, true);
if (ret) if (ret)
goto out_fail; return ret;
ret = btrfs_reserve_metadata_bytes(root, block_rsv, meta_reserve, flush); ret = btrfs_reserve_metadata_bytes(root, block_rsv, meta_reserve, flush);
if (ret) if (ret) {
goto out_qgroup; btrfs_qgroup_free_meta_prealloc(root, qgroup_reserve);
return ret;
}
/* /*
* Now we need to update our outstanding extents and csum bytes _first_ * Now we need to update our outstanding extents and csum bytes _first_
...@@ -375,15 +372,7 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes) ...@@ -375,15 +372,7 @@ int btrfs_delalloc_reserve_metadata(struct btrfs_inode *inode, u64 num_bytes)
block_rsv->qgroup_rsv_reserved += qgroup_reserve; block_rsv->qgroup_rsv_reserved += qgroup_reserve;
spin_unlock(&block_rsv->lock); spin_unlock(&block_rsv->lock);
if (delalloc_lock)
mutex_unlock(&inode->delalloc_mutex);
return 0; return 0;
out_qgroup:
btrfs_qgroup_free_meta_prealloc(root, qgroup_reserve);
out_fail:
if (delalloc_lock)
mutex_unlock(&inode->delalloc_mutex);
return ret;
} }
/** /**
......
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#include "transaction.h" #include "transaction.h"
#include "ctree.h" #include "ctree.h"
#include "qgroup.h" #include "qgroup.h"
#include "locking.h"
#define BTRFS_DELAYED_WRITEBACK 512 #define BTRFS_DELAYED_WRITEBACK 512
#define BTRFS_DELAYED_BACKGROUND 128 #define BTRFS_DELAYED_BACKGROUND 128
...@@ -1367,8 +1368,8 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root, ...@@ -1367,8 +1368,8 @@ static int btrfs_wq_run_delayed_node(struct btrfs_delayed_root *delayed_root,
return -ENOMEM; return -ENOMEM;
async_work->delayed_root = delayed_root; async_work->delayed_root = delayed_root;
btrfs_init_work(&async_work->work, btrfs_delayed_meta_helper, btrfs_init_work(&async_work->work, btrfs_async_run_delayed_root, NULL,
btrfs_async_run_delayed_root, NULL, NULL); NULL);
async_work->nr = nr; async_work->nr = nr;
btrfs_queue_work(fs_info->delayed_workers, &async_work->work); btrfs_queue_work(fs_info->delayed_workers, &async_work->work);
...@@ -1949,12 +1950,19 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root) ...@@ -1949,12 +1950,19 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
} }
inode_id = delayed_nodes[n - 1]->inode_id + 1; inode_id = delayed_nodes[n - 1]->inode_id + 1;
for (i = 0; i < n; i++) {
for (i = 0; i < n; i++) /*
refcount_inc(&delayed_nodes[i]->refs); * Don't increase refs in case the node is dead and
* about to be removed from the tree in the loop below
*/
if (!refcount_inc_not_zero(&delayed_nodes[i]->refs))
delayed_nodes[i] = NULL;
}
spin_unlock(&root->inode_lock); spin_unlock(&root->inode_lock);
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
if (!delayed_nodes[i])
continue;
__btrfs_kill_delayed_node(delayed_nodes[i]); __btrfs_kill_delayed_node(delayed_nodes[i]);
btrfs_release_delayed_node(delayed_nodes[i]); btrfs_release_delayed_node(delayed_nodes[i]);
} }
......
...@@ -986,7 +986,7 @@ static int btrfs_dev_replace_kthread(void *data) ...@@ -986,7 +986,7 @@ static int btrfs_dev_replace_kthread(void *data)
return 0; return 0;
} }
int btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace) int __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace)
{ {
if (!dev_replace->is_valid) if (!dev_replace->is_valid)
return 0; return 0;
......
...@@ -17,6 +17,6 @@ void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info, ...@@ -17,6 +17,6 @@ void btrfs_dev_replace_status(struct btrfs_fs_info *fs_info,
int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info); int btrfs_dev_replace_cancel(struct btrfs_fs_info *fs_info);
void btrfs_dev_replace_suspend_for_unmount(struct btrfs_fs_info *fs_info); void btrfs_dev_replace_suspend_for_unmount(struct btrfs_fs_info *fs_info);
int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info); int btrfs_resume_dev_replace_async(struct btrfs_fs_info *fs_info);
int btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace); int __pure btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace);
#endif #endif
This diff is collapsed.
...@@ -49,10 +49,10 @@ struct extent_buffer *btrfs_find_create_tree_block( ...@@ -49,10 +49,10 @@ struct extent_buffer *btrfs_find_create_tree_block(
struct btrfs_fs_info *fs_info, struct btrfs_fs_info *fs_info,
u64 bytenr); u64 bytenr);
void btrfs_clean_tree_block(struct extent_buffer *buf); void btrfs_clean_tree_block(struct extent_buffer *buf);
int open_ctree(struct super_block *sb, int __cold open_ctree(struct super_block *sb,
struct btrfs_fs_devices *fs_devices, struct btrfs_fs_devices *fs_devices,
char *options); char *options);
void close_ctree(struct btrfs_fs_info *fs_info); void __cold close_ctree(struct btrfs_fs_info *fs_info);
int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors); int write_all_supers(struct btrfs_fs_info *fs_info, int max_mirrors);
struct buffer_head *btrfs_read_dev_super(struct block_device *bdev); struct buffer_head *btrfs_read_dev_super(struct block_device *bdev);
int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num, int btrfs_read_dev_one_super(struct block_device *bdev, int copy_num,
......
...@@ -87,7 +87,7 @@ static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid, ...@@ -87,7 +87,7 @@ static struct dentry *btrfs_get_dentry(struct super_block *sb, u64 objectid,
key.type = BTRFS_INODE_ITEM_KEY; key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0; key.offset = 0;
inode = btrfs_iget(sb, &key, root, NULL); inode = btrfs_iget(sb, &key, root);
if (IS_ERR(inode)) { if (IS_ERR(inode)) {
err = PTR_ERR(inode); err = PTR_ERR(inode);
goto fail; goto fail;
...@@ -214,7 +214,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child) ...@@ -214,7 +214,7 @@ static struct dentry *btrfs_get_parent(struct dentry *child)
key.type = BTRFS_INODE_ITEM_KEY; key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0; key.offset = 0;
return d_obtain_alias(btrfs_iget(fs_info->sb, &key, root, NULL)); return d_obtain_alias(btrfs_iget(fs_info->sb, &key, root));
fail: fail:
btrfs_free_path(path); btrfs_free_path(path);
return ERR_PTR(ret); return ERR_PTR(ret);
......
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef BTRFS_EXTENT_IO_TREE_H
#define BTRFS_EXTENT_IO_TREE_H
struct extent_changeset;
struct io_failure_record;
/* Bits for the extent state */
#define EXTENT_DIRTY (1U << 0)
#define EXTENT_UPTODATE (1U << 1)
#define EXTENT_LOCKED (1U << 2)
#define EXTENT_NEW (1U << 3)
#define EXTENT_DELALLOC (1U << 4)
#define EXTENT_DEFRAG (1U << 5)
#define EXTENT_BOUNDARY (1U << 6)
#define EXTENT_NODATASUM (1U << 7)
#define EXTENT_CLEAR_META_RESV (1U << 8)
#define EXTENT_NEED_WAIT (1U << 9)
#define EXTENT_DAMAGED (1U << 10)
#define EXTENT_NORESERVE (1U << 11)
#define EXTENT_QGROUP_RESERVED (1U << 12)
#define EXTENT_CLEAR_DATA_RESV (1U << 13)
#define EXTENT_DELALLOC_NEW (1U << 14)
#define EXTENT_DO_ACCOUNTING (EXTENT_CLEAR_META_RESV | \
EXTENT_CLEAR_DATA_RESV)
#define EXTENT_CTLBITS (EXTENT_DO_ACCOUNTING)
/*
* Redefined bits above which are used only in the device allocation tree,
* shouldn't be using EXTENT_LOCKED / EXTENT_BOUNDARY / EXTENT_CLEAR_META_RESV
* / EXTENT_CLEAR_DATA_RESV because they have special meaning to the bit
* manipulation functions
*/
#define CHUNK_ALLOCATED EXTENT_DIRTY
#define CHUNK_TRIMMED EXTENT_DEFRAG
enum {
IO_TREE_FS_INFO_FREED_EXTENTS0,
IO_TREE_FS_INFO_FREED_EXTENTS1,
IO_TREE_INODE_IO,
IO_TREE_INODE_IO_FAILURE,
IO_TREE_RELOC_BLOCKS,
IO_TREE_TRANS_DIRTY_PAGES,
IO_TREE_ROOT_DIRTY_LOG_PAGES,
IO_TREE_SELFTEST,
};
struct extent_io_tree {
struct rb_root state;
struct btrfs_fs_info *fs_info;
void *private_data;
u64 dirty_bytes;
bool track_uptodate;
/* Who owns this io tree, should be one of IO_TREE_* */
u8 owner;
spinlock_t lock;
const struct extent_io_ops *ops;
};
struct extent_state {
u64 start;
u64 end; /* inclusive */
struct rb_node rb_node;
/* ADD NEW ELEMENTS AFTER THIS */
wait_queue_head_t wq;
refcount_t refs;
unsigned state;
struct io_failure_record *failrec;
#ifdef CONFIG_BTRFS_DEBUG
struct list_head leak_list;
#endif
};
int __init extent_state_cache_init(void);
void __cold extent_state_cache_exit(void);
void extent_io_tree_init(struct btrfs_fs_info *fs_info,
struct extent_io_tree *tree, unsigned int owner,
void *private_data);
void extent_io_tree_release(struct extent_io_tree *tree);
int lock_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
struct extent_state **cached);
static inline int lock_extent(struct extent_io_tree *tree, u64 start, u64 end)
{
return lock_extent_bits(tree, start, end, NULL);
}
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
int __init extent_io_init(void);
void __cold extent_io_exit(void);
u64 count_range_bits(struct extent_io_tree *tree,
u64 *start, u64 search_end,
u64 max_bytes, unsigned bits, int contig);
void free_extent_state(struct extent_state *state);
int test_range_bit(struct extent_io_tree *tree, u64 start, u64 end,
unsigned bits, int filled,
struct extent_state *cached_state);
int clear_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
unsigned bits, struct extent_changeset *changeset);
int clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
unsigned bits, int wake, int delete,
struct extent_state **cached);
int __clear_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
unsigned bits, int wake, int delete,
struct extent_state **cached, gfp_t mask,
struct extent_changeset *changeset);
static inline int unlock_extent(struct extent_io_tree *tree, u64 start, u64 end)
{
return clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, NULL);
}
static inline int unlock_extent_cached(struct extent_io_tree *tree, u64 start,
u64 end, struct extent_state **cached)
{
return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
GFP_NOFS, NULL);
}
static inline int unlock_extent_cached_atomic(struct extent_io_tree *tree,
u64 start, u64 end, struct extent_state **cached)
{
return __clear_extent_bit(tree, start, end, EXTENT_LOCKED, 1, 0, cached,
GFP_ATOMIC, NULL);
}
static inline int clear_extent_bits(struct extent_io_tree *tree, u64 start,
u64 end, unsigned bits)
{
int wake = 0;
if (bits & EXTENT_LOCKED)
wake = 1;
return clear_extent_bit(tree, start, end, bits, wake, 0, NULL);
}
int set_record_extent_bits(struct extent_io_tree *tree, u64 start, u64 end,
unsigned bits, struct extent_changeset *changeset);
int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
unsigned bits, u64 *failed_start,
struct extent_state **cached_state, gfp_t mask);
int set_extent_bits_nowait(struct extent_io_tree *tree, u64 start, u64 end,
unsigned bits);
static inline int set_extent_bits(struct extent_io_tree *tree, u64 start,
u64 end, unsigned bits)
{
return set_extent_bit(tree, start, end, bits, NULL, NULL, GFP_NOFS);
}
static inline int clear_extent_uptodate(struct extent_io_tree *tree, u64 start,
u64 end, struct extent_state **cached_state)
{
return __clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
cached_state, GFP_NOFS, NULL);
}
static inline int set_extent_dirty(struct extent_io_tree *tree, u64 start,
u64 end, gfp_t mask)
{
return set_extent_bit(tree, start, end, EXTENT_DIRTY, NULL,
NULL, mask);
}
static inline int clear_extent_dirty(struct extent_io_tree *tree, u64 start,
u64 end, struct extent_state **cached)
{
return clear_extent_bit(tree, start, end,
EXTENT_DIRTY | EXTENT_DELALLOC |
EXTENT_DO_ACCOUNTING, 0, 0, cached);
}
int convert_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
unsigned bits, unsigned clear_bits,
struct extent_state **cached_state);
static inline int set_extent_delalloc(struct extent_io_tree *tree, u64 start,
u64 end, unsigned int extra_bits,
struct extent_state **cached_state)
{
return set_extent_bit(tree, start, end,
EXTENT_DELALLOC | EXTENT_UPTODATE | extra_bits,
NULL, cached_state, GFP_NOFS);
}
static inline int set_extent_defrag(struct extent_io_tree *tree, u64 start,
u64 end, struct extent_state **cached_state)
{
return set_extent_bit(tree, start, end,
EXTENT_DELALLOC | EXTENT_UPTODATE | EXTENT_DEFRAG,
NULL, cached_state, GFP_NOFS);
}
static inline int set_extent_new(struct extent_io_tree *tree, u64 start,
u64 end)
{
return set_extent_bit(tree, start, end, EXTENT_NEW, NULL, NULL,
GFP_NOFS);
}
static inline int set_extent_uptodate(struct extent_io_tree *tree, u64 start,
u64 end, struct extent_state **cached_state, gfp_t mask)
{
return set_extent_bit(tree, start, end, EXTENT_UPTODATE, NULL,
cached_state, mask);
}
int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
u64 *start_ret, u64 *end_ret, unsigned bits,
struct extent_state **cached_state);
void find_first_clear_extent_bit(struct extent_io_tree *tree, u64 start,
u64 *start_ret, u64 *end_ret, unsigned bits);
int extent_invalidatepage(struct extent_io_tree *tree,
struct page *page, unsigned long offset);
bool btrfs_find_delalloc_range(struct extent_io_tree *tree, u64 *start,
u64 *end, u64 max_bytes,
struct extent_state **cached_state);
/* This should be reworked in the future and put elsewhere. */
int get_state_failrec(struct extent_io_tree *tree, u64 start,
struct io_failure_record **failrec);
int set_state_failrec(struct extent_io_tree *tree, u64 start,
struct io_failure_record *failrec);
void btrfs_free_io_failure_record(struct btrfs_inode *inode, u64 start,
u64 end);
int btrfs_get_io_failure_record(struct inode *inode, u64 start, u64 end,
struct io_failure_record **failrec_ret);
int free_io_failure(struct extent_io_tree *failure_tree,
struct extent_io_tree *io_tree,
struct io_failure_record *rec);
int clean_io_failure(struct btrfs_fs_info *fs_info,
struct extent_io_tree *failure_tree,
struct extent_io_tree *io_tree, u64 start,
struct page *page, u64 ino, unsigned int pg_offset);
#endif /* BTRFS_EXTENT_IO_TREE_H */
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -214,9 +214,13 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next) ...@@ -214,9 +214,13 @@ static int mergable_maps(struct extent_map *prev, struct extent_map *next)
ASSERT(next->block_start != EXTENT_MAP_DELALLOC && ASSERT(next->block_start != EXTENT_MAP_DELALLOC &&
prev->block_start != EXTENT_MAP_DELALLOC); prev->block_start != EXTENT_MAP_DELALLOC);
if (prev->map_lookup || next->map_lookup)
ASSERT(test_bit(EXTENT_FLAG_FS_MAPPING, &prev->flags) &&
test_bit(EXTENT_FLAG_FS_MAPPING, &next->flags));
if (extent_map_end(prev) == next->start && if (extent_map_end(prev) == next->start &&
prev->flags == next->flags && prev->flags == next->flags &&
prev->bdev == next->bdev && prev->map_lookup == next->map_lookup &&
((next->block_start == EXTENT_MAP_HOLE && ((next->block_start == EXTENT_MAP_HOLE &&
prev->block_start == EXTENT_MAP_HOLE) || prev->block_start == EXTENT_MAP_HOLE) ||
(next->block_start == EXTENT_MAP_INLINE && (next->block_start == EXTENT_MAP_INLINE &&
......
...@@ -42,15 +42,8 @@ struct extent_map { ...@@ -42,15 +42,8 @@ struct extent_map {
u64 block_len; u64 block_len;
u64 generation; u64 generation;
unsigned long flags; unsigned long flags;
union { /* Used for chunk mappings, flag EXTENT_FLAG_FS_MAPPING must be set */
struct block_device *bdev;
/*
* used for chunk mappings
* flags & EXTENT_FLAG_FS_MAPPING must be set
*/
struct map_lookup *map_lookup; struct map_lookup *map_lookup;
};
refcount_t refs; refcount_t refs;
unsigned int compress_type; unsigned int compress_type;
struct list_head list; struct list_head list;
......
...@@ -945,7 +945,6 @@ void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode, ...@@ -945,7 +945,6 @@ void btrfs_extent_item_to_extent_map(struct btrfs_inode *inode,
u8 type = btrfs_file_extent_type(leaf, fi); u8 type = btrfs_file_extent_type(leaf, fi);
int compress_type = btrfs_file_extent_compression(leaf, fi); int compress_type = btrfs_file_extent_compression(leaf, fi);
em->bdev = fs_info->fs_devices->latest_bdev;
btrfs_item_key_to_cpu(leaf, &key, slot); btrfs_item_key_to_cpu(leaf, &key, slot);
extent_start = key.offset; extent_start = key.offset;
......
...@@ -296,7 +296,7 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info, ...@@ -296,7 +296,7 @@ static int __btrfs_run_defrag_inode(struct btrfs_fs_info *fs_info,
key.objectid = defrag->ino; key.objectid = defrag->ino;
key.type = BTRFS_INODE_ITEM_KEY; key.type = BTRFS_INODE_ITEM_KEY;
key.offset = 0; key.offset = 0;
inode = btrfs_iget(fs_info->sb, &key, inode_root, NULL); inode = btrfs_iget(fs_info->sb, &key, inode_root);
if (IS_ERR(inode)) { if (IS_ERR(inode)) {
ret = PTR_ERR(inode); ret = PTR_ERR(inode);
goto cleanup; goto cleanup;
...@@ -667,7 +667,6 @@ void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end, ...@@ -667,7 +667,6 @@ void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
} }
split->generation = gen; split->generation = gen;
split->bdev = em->bdev;
split->flags = flags; split->flags = flags;
split->compress_type = em->compress_type; split->compress_type = em->compress_type;
replace_extent_mapping(em_tree, em, split, modified); replace_extent_mapping(em_tree, em, split, modified);
...@@ -680,7 +679,6 @@ void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end, ...@@ -680,7 +679,6 @@ void btrfs_drop_extent_cache(struct btrfs_inode *inode, u64 start, u64 end,
split->start = start + len; split->start = start + len;
split->len = em->start + em->len - (start + len); split->len = em->start + em->len - (start + len);
split->bdev = em->bdev;
split->flags = flags; split->flags = flags;
split->compress_type = em->compress_type; split->compress_type = em->compress_type;
split->generation = gen; split->generation = gen;
...@@ -1636,6 +1634,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, ...@@ -1636,6 +1634,7 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
break; break;
} }
only_release_metadata = false;
sector_offset = pos & (fs_info->sectorsize - 1); sector_offset = pos & (fs_info->sectorsize - 1);
reserve_bytes = round_up(write_bytes + sector_offset, reserve_bytes = round_up(write_bytes + sector_offset,
fs_info->sectorsize); fs_info->sectorsize);
...@@ -1791,7 +1790,6 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb, ...@@ -1791,7 +1790,6 @@ static noinline ssize_t btrfs_buffered_write(struct kiocb *iocb,
set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart, set_extent_bit(&BTRFS_I(inode)->io_tree, lockstart,
lockend, EXTENT_NORESERVE, NULL, lockend, EXTENT_NORESERVE, NULL,
NULL, GFP_NOFS); NULL, GFP_NOFS);
only_release_metadata = false;
} }
btrfs_drop_pages(pages, num_pages); btrfs_drop_pages(pages, num_pages);
...@@ -1903,9 +1901,10 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb, ...@@ -1903,9 +1901,10 @@ static ssize_t btrfs_file_write_iter(struct kiocb *iocb,
(iocb->ki_flags & IOCB_NOWAIT)) (iocb->ki_flags & IOCB_NOWAIT))
return -EOPNOTSUPP; return -EOPNOTSUPP;
if (!inode_trylock(inode)) { if (iocb->ki_flags & IOCB_NOWAIT) {
if (iocb->ki_flags & IOCB_NOWAIT) if (!inode_trylock(inode))
return -EAGAIN; return -EAGAIN;
} else {
inode_lock(inode); inode_lock(inode);
} }
...@@ -2359,7 +2358,6 @@ static int fill_holes(struct btrfs_trans_handle *trans, ...@@ -2359,7 +2358,6 @@ static int fill_holes(struct btrfs_trans_handle *trans,
hole_em->block_start = EXTENT_MAP_HOLE; hole_em->block_start = EXTENT_MAP_HOLE;
hole_em->block_len = 0; hole_em->block_len = 0;
hole_em->orig_block_len = 0; hole_em->orig_block_len = 0;
hole_em->bdev = fs_info->fs_devices->latest_bdev;
hole_em->compress_type = BTRFS_COMPRESS_NONE; hole_em->compress_type = BTRFS_COMPRESS_NONE;
hole_em->generation = trans->transid; hole_em->generation = trans->transid;
...@@ -3350,29 +3348,30 @@ static long btrfs_fallocate(struct file *file, int mode, ...@@ -3350,29 +3348,30 @@ static long btrfs_fallocate(struct file *file, int mode,
return ret; return ret;
} }
static int find_desired_extent(struct inode *inode, loff_t *offset, int whence) static loff_t find_desired_extent(struct inode *inode, loff_t offset,
int whence)
{ {
struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb); struct btrfs_fs_info *fs_info = btrfs_sb(inode->i_sb);
struct extent_map *em = NULL; struct extent_map *em = NULL;
struct extent_state *cached_state = NULL; struct extent_state *cached_state = NULL;
loff_t i_size = inode->i_size;
u64 lockstart; u64 lockstart;
u64 lockend; u64 lockend;
u64 start; u64 start;
u64 len; u64 len;
int ret = 0; int ret = 0;
if (inode->i_size == 0) if (i_size == 0 || offset >= i_size)
return -ENXIO; return -ENXIO;
/* /*
* *offset can be negative, in this case we start finding DATA/HOLE from * offset can be negative, in this case we start finding DATA/HOLE from
* the very start of the file. * the very start of the file.
*/ */
start = max_t(loff_t, 0, *offset); start = max_t(loff_t, 0, offset);
lockstart = round_down(start, fs_info->sectorsize); lockstart = round_down(start, fs_info->sectorsize);
lockend = round_up(i_size_read(inode), lockend = round_up(i_size, fs_info->sectorsize);
fs_info->sectorsize);
if (lockend <= lockstart) if (lockend <= lockstart)
lockend = lockstart + fs_info->sectorsize; lockend = lockstart + fs_info->sectorsize;
lockend--; lockend--;
...@@ -3381,7 +3380,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence) ...@@ -3381,7 +3380,7 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend, lock_extent_bits(&BTRFS_I(inode)->io_tree, lockstart, lockend,
&cached_state); &cached_state);
while (start < inode->i_size) { while (start < i_size) {
em = btrfs_get_extent_fiemap(BTRFS_I(inode), start, len); em = btrfs_get_extent_fiemap(BTRFS_I(inode), start, len);
if (IS_ERR(em)) { if (IS_ERR(em)) {
ret = PTR_ERR(em); ret = PTR_ERR(em);
...@@ -3404,46 +3403,39 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence) ...@@ -3404,46 +3403,39 @@ static int find_desired_extent(struct inode *inode, loff_t *offset, int whence)
cond_resched(); cond_resched();
} }
free_extent_map(em); free_extent_map(em);
if (!ret) {
if (whence == SEEK_DATA && start >= inode->i_size)
ret = -ENXIO;
else
*offset = min_t(loff_t, start, inode->i_size);
}
unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend, unlock_extent_cached(&BTRFS_I(inode)->io_tree, lockstart, lockend,
&cached_state); &cached_state);
return ret; if (ret) {
offset = ret;
} else {
if (whence == SEEK_DATA && start >= i_size)
offset = -ENXIO;
else
offset = min_t(loff_t, start, i_size);
}
return offset;
} }
static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence) static loff_t btrfs_file_llseek(struct file *file, loff_t offset, int whence)
{ {
struct inode *inode = file->f_mapping->host; struct inode *inode = file->f_mapping->host;
int ret;
inode_lock(inode);
switch (whence) { switch (whence) {
case SEEK_END: default:
case SEEK_CUR: return generic_file_llseek(file, offset, whence);
offset = generic_file_llseek(file, offset, whence);
goto out;
case SEEK_DATA: case SEEK_DATA:
case SEEK_HOLE: case SEEK_HOLE:
if (offset >= i_size_read(inode)) { inode_lock_shared(inode);
inode_unlock(inode); offset = find_desired_extent(inode, offset, whence);
return -ENXIO; inode_unlock_shared(inode);
} break;
ret = find_desired_extent(inode, &offset, whence);
if (ret) {
inode_unlock(inode);
return ret;
}
} }
offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes); if (offset < 0)
out:
inode_unlock(inode);
return offset; return offset;
return vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
} }
static int btrfs_file_open(struct inode *inode, struct file *filp) static int btrfs_file_open(struct inode *inode, struct file *filp)
......
This diff is collapsed.
...@@ -50,24 +50,23 @@ struct btrfs_io_ctl { ...@@ -50,24 +50,23 @@ struct btrfs_io_ctl {
unsigned check_crcs:1; unsigned check_crcs:1;
}; };
struct inode *lookup_free_space_inode( struct inode *lookup_free_space_inode(struct btrfs_block_group *block_group,
struct btrfs_block_group_cache *block_group,
struct btrfs_path *path); struct btrfs_path *path);
int create_free_space_inode(struct btrfs_trans_handle *trans, int create_free_space_inode(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group, struct btrfs_block_group *block_group,
struct btrfs_path *path); struct btrfs_path *path);
int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info, int btrfs_check_trunc_cache_free_space(struct btrfs_fs_info *fs_info,
struct btrfs_block_rsv *rsv); struct btrfs_block_rsv *rsv);
int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans, int btrfs_truncate_free_space_cache(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group, struct btrfs_block_group *block_group,
struct inode *inode); struct inode *inode);
int load_free_space_cache(struct btrfs_block_group_cache *block_group); int load_free_space_cache(struct btrfs_block_group *block_group);
int btrfs_wait_cache_io(struct btrfs_trans_handle *trans, int btrfs_wait_cache_io(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group, struct btrfs_block_group *block_group,
struct btrfs_path *path); struct btrfs_path *path);
int btrfs_write_out_cache(struct btrfs_trans_handle *trans, int btrfs_write_out_cache(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group, struct btrfs_block_group *block_group,
struct btrfs_path *path); struct btrfs_path *path);
struct inode *lookup_free_ino_inode(struct btrfs_root *root, struct inode *lookup_free_ino_inode(struct btrfs_root *root,
struct btrfs_path *path); struct btrfs_path *path);
...@@ -81,42 +80,40 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root, ...@@ -81,42 +80,40 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
struct btrfs_path *path, struct btrfs_path *path,
struct inode *inode); struct inode *inode);
void btrfs_init_free_space_ctl(struct btrfs_block_group_cache *block_group); void btrfs_init_free_space_ctl(struct btrfs_block_group *block_group);
int __btrfs_add_free_space(struct btrfs_fs_info *fs_info, int __btrfs_add_free_space(struct btrfs_fs_info *fs_info,
struct btrfs_free_space_ctl *ctl, struct btrfs_free_space_ctl *ctl,
u64 bytenr, u64 size); u64 bytenr, u64 size);
int btrfs_add_free_space(struct btrfs_block_group_cache *block_group, int btrfs_add_free_space(struct btrfs_block_group *block_group,
u64 bytenr, u64 size); u64 bytenr, u64 size);
int btrfs_remove_free_space(struct btrfs_block_group_cache *block_group, int btrfs_remove_free_space(struct btrfs_block_group *block_group,
u64 bytenr, u64 size); u64 bytenr, u64 size);
void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl); void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl *ctl);
void btrfs_remove_free_space_cache(struct btrfs_block_group_cache void btrfs_remove_free_space_cache(struct btrfs_block_group *block_group);
*block_group); u64 btrfs_find_space_for_alloc(struct btrfs_block_group *block_group,
u64 btrfs_find_space_for_alloc(struct btrfs_block_group_cache *block_group,
u64 offset, u64 bytes, u64 empty_size, u64 offset, u64 bytes, u64 empty_size,
u64 *max_extent_size); u64 *max_extent_size);
u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root); u64 btrfs_find_ino_for_alloc(struct btrfs_root *fs_root);
void btrfs_dump_free_space(struct btrfs_block_group_cache *block_group, void btrfs_dump_free_space(struct btrfs_block_group *block_group,
u64 bytes); u64 bytes);
int btrfs_find_space_cluster(struct btrfs_block_group_cache *block_group, int btrfs_find_space_cluster(struct btrfs_block_group *block_group,
struct btrfs_free_cluster *cluster, struct btrfs_free_cluster *cluster,
u64 offset, u64 bytes, u64 empty_size); u64 offset, u64 bytes, u64 empty_size);
void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster); void btrfs_init_free_cluster(struct btrfs_free_cluster *cluster);
u64 btrfs_alloc_from_cluster(struct btrfs_block_group_cache *block_group, u64 btrfs_alloc_from_cluster(struct btrfs_block_group *block_group,
struct btrfs_free_cluster *cluster, u64 bytes, struct btrfs_free_cluster *cluster, u64 bytes,
u64 min_start, u64 *max_extent_size); u64 min_start, u64 *max_extent_size);
int btrfs_return_cluster_to_free_space( int btrfs_return_cluster_to_free_space(
struct btrfs_block_group_cache *block_group, struct btrfs_block_group *block_group,
struct btrfs_free_cluster *cluster); struct btrfs_free_cluster *cluster);
int btrfs_trim_block_group(struct btrfs_block_group_cache *block_group, int btrfs_trim_block_group(struct btrfs_block_group *block_group,
u64 *trimmed, u64 start, u64 end, u64 minlen); u64 *trimmed, u64 start, u64 end, u64 minlen);
/* Support functions for running our sanity tests */ /* Support functions for running our sanity tests */
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
int test_add_free_space_entry(struct btrfs_block_group_cache *cache, int test_add_free_space_entry(struct btrfs_block_group *cache,
u64 offset, u64 bytes, bool bitmap); u64 offset, u64 bytes, bool bitmap);
int test_check_exists(struct btrfs_block_group_cache *cache, int test_check_exists(struct btrfs_block_group *cache, u64 offset, u64 bytes);
u64 offset, u64 bytes);
#endif #endif
#endif #endif
This diff is collapsed.
...@@ -16,14 +16,14 @@ struct btrfs_caching_control; ...@@ -16,14 +16,14 @@ struct btrfs_caching_control;
#define BTRFS_FREE_SPACE_BITMAP_SIZE 256 #define BTRFS_FREE_SPACE_BITMAP_SIZE 256
#define BTRFS_FREE_SPACE_BITMAP_BITS (BTRFS_FREE_SPACE_BITMAP_SIZE * BITS_PER_BYTE) #define BTRFS_FREE_SPACE_BITMAP_BITS (BTRFS_FREE_SPACE_BITMAP_SIZE * BITS_PER_BYTE)
void set_free_space_tree_thresholds(struct btrfs_block_group_cache *block_group); void set_free_space_tree_thresholds(struct btrfs_block_group *block_group);
int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info); int btrfs_create_free_space_tree(struct btrfs_fs_info *fs_info);
int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info); int btrfs_clear_free_space_tree(struct btrfs_fs_info *fs_info);
int load_free_space_tree(struct btrfs_caching_control *caching_ctl); int load_free_space_tree(struct btrfs_caching_control *caching_ctl);
int add_block_group_free_space(struct btrfs_trans_handle *trans, int add_block_group_free_space(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group); struct btrfs_block_group *block_group);
int remove_block_group_free_space(struct btrfs_trans_handle *trans, int remove_block_group_free_space(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group); struct btrfs_block_group *block_group);
int add_to_free_space_tree(struct btrfs_trans_handle *trans, int add_to_free_space_tree(struct btrfs_trans_handle *trans,
u64 start, u64 size); u64 start, u64 size);
int remove_from_free_space_tree(struct btrfs_trans_handle *trans, int remove_from_free_space_tree(struct btrfs_trans_handle *trans,
...@@ -32,21 +32,21 @@ int remove_from_free_space_tree(struct btrfs_trans_handle *trans, ...@@ -32,21 +32,21 @@ int remove_from_free_space_tree(struct btrfs_trans_handle *trans,
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS #ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
struct btrfs_free_space_info * struct btrfs_free_space_info *
search_free_space_info(struct btrfs_trans_handle *trans, search_free_space_info(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group, struct btrfs_block_group *block_group,
struct btrfs_path *path, int cow); struct btrfs_path *path, int cow);
int __add_to_free_space_tree(struct btrfs_trans_handle *trans, int __add_to_free_space_tree(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group, struct btrfs_block_group *block_group,
struct btrfs_path *path, u64 start, u64 size); struct btrfs_path *path, u64 start, u64 size);
int __remove_from_free_space_tree(struct btrfs_trans_handle *trans, int __remove_from_free_space_tree(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group, struct btrfs_block_group *block_group,
struct btrfs_path *path, u64 start, u64 size); struct btrfs_path *path, u64 start, u64 size);
int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans, int convert_free_space_to_bitmaps(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group, struct btrfs_block_group *block_group,
struct btrfs_path *path); struct btrfs_path *path);
int convert_free_space_to_extents(struct btrfs_trans_handle *trans, int convert_free_space_to_extents(struct btrfs_trans_handle *trans,
struct btrfs_block_group_cache *block_group, struct btrfs_block_group *block_group,
struct btrfs_path *path); struct btrfs_path *path);
int free_space_test_bit(struct btrfs_block_group_cache *block_group, int free_space_test_bit(struct btrfs_block_group *block_group,
struct btrfs_path *path, u64 offset); struct btrfs_path *path, u64 offset);
#endif #endif
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
...@@ -6,6 +6,8 @@ ...@@ -6,6 +6,8 @@
#ifndef BTRFS_LOCKING_H #ifndef BTRFS_LOCKING_H
#define BTRFS_LOCKING_H #define BTRFS_LOCKING_H
#include "extent_io.h"
#define BTRFS_WRITE_LOCK 1 #define BTRFS_WRITE_LOCK 1
#define BTRFS_READ_LOCK 2 #define BTRFS_READ_LOCK 2
#define BTRFS_WRITE_LOCK_BLOCKING 3 #define BTRFS_WRITE_LOCK_BLOCKING 3
...@@ -19,11 +21,20 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb); ...@@ -19,11 +21,20 @@ void btrfs_tree_read_unlock(struct extent_buffer *eb);
void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb); void btrfs_tree_read_unlock_blocking(struct extent_buffer *eb);
void btrfs_set_lock_blocking_read(struct extent_buffer *eb); void btrfs_set_lock_blocking_read(struct extent_buffer *eb);
void btrfs_set_lock_blocking_write(struct extent_buffer *eb); void btrfs_set_lock_blocking_write(struct extent_buffer *eb);
void btrfs_assert_tree_locked(struct extent_buffer *eb);
int btrfs_try_tree_read_lock(struct extent_buffer *eb); int btrfs_try_tree_read_lock(struct extent_buffer *eb);
int btrfs_try_tree_write_lock(struct extent_buffer *eb); int btrfs_try_tree_write_lock(struct extent_buffer *eb);
int btrfs_tree_read_lock_atomic(struct extent_buffer *eb); int btrfs_tree_read_lock_atomic(struct extent_buffer *eb);
#ifdef CONFIG_BTRFS_DEBUG
static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) {
BUG_ON(!eb->write_locks);
}
#else
static inline void btrfs_assert_tree_locked(struct extent_buffer *eb) { }
#endif
void btrfs_set_path_blocking(struct btrfs_path *p);
void btrfs_unlock_up_safe(struct btrfs_path *path, int level);
static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
{ {
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment