Commit d458b054 authored by Qu Wenruo's avatar Qu Wenruo Committed by Josef Bacik

btrfs: Cleanup the "_struct" suffix in btrfs_workequeue

Since the "_struct" suffix is mainly used for distinguish the differnt
btrfs_work between the original and the newly created one,
there is no need using the suffix since all btrfs_workers are changed
into btrfs_workqueue.

Also this patch fixed some codes whose code style is changed due to the
too long "_struct" suffix.
Signed-off-by: default avatarQu Wenruo <quwenruo@cn.fujitsu.com>
Tested-by: default avatarDavid Sterba <dsterba@suse.cz>
Signed-off-by: default avatarJosef Bacik <jbacik@fb.com>
parent a046e9c8
...@@ -32,7 +32,7 @@ ...@@ -32,7 +32,7 @@
#define NO_THRESHOLD (-1) #define NO_THRESHOLD (-1)
#define DFT_THRESHOLD (32) #define DFT_THRESHOLD (32)
struct __btrfs_workqueue_struct { struct __btrfs_workqueue {
struct workqueue_struct *normal_wq; struct workqueue_struct *normal_wq;
/* List head pointing to ordered work list */ /* List head pointing to ordered work list */
struct list_head ordered_list; struct list_head ordered_list;
...@@ -49,15 +49,15 @@ struct __btrfs_workqueue_struct { ...@@ -49,15 +49,15 @@ struct __btrfs_workqueue_struct {
spinlock_t thres_lock; spinlock_t thres_lock;
}; };
struct btrfs_workqueue_struct { struct btrfs_workqueue {
struct __btrfs_workqueue_struct *normal; struct __btrfs_workqueue *normal;
struct __btrfs_workqueue_struct *high; struct __btrfs_workqueue *high;
}; };
static inline struct __btrfs_workqueue_struct static inline struct __btrfs_workqueue
*__btrfs_alloc_workqueue(char *name, int flags, int max_active, int thresh) *__btrfs_alloc_workqueue(char *name, int flags, int max_active, int thresh)
{ {
struct __btrfs_workqueue_struct *ret = kzalloc(sizeof(*ret), GFP_NOFS); struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
if (unlikely(!ret)) if (unlikely(!ret))
return NULL; return NULL;
...@@ -95,14 +95,14 @@ static inline struct __btrfs_workqueue_struct ...@@ -95,14 +95,14 @@ static inline struct __btrfs_workqueue_struct
} }
static inline void static inline void
__btrfs_destroy_workqueue(struct __btrfs_workqueue_struct *wq); __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
struct btrfs_workqueue_struct *btrfs_alloc_workqueue(char *name, struct btrfs_workqueue *btrfs_alloc_workqueue(char *name,
int flags, int flags,
int max_active, int max_active,
int thresh) int thresh)
{ {
struct btrfs_workqueue_struct *ret = kzalloc(sizeof(*ret), GFP_NOFS); struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
if (unlikely(!ret)) if (unlikely(!ret))
return NULL; return NULL;
...@@ -131,7 +131,7 @@ struct btrfs_workqueue_struct *btrfs_alloc_workqueue(char *name, ...@@ -131,7 +131,7 @@ struct btrfs_workqueue_struct *btrfs_alloc_workqueue(char *name,
* This hook WILL be called in IRQ handler context, * This hook WILL be called in IRQ handler context,
* so workqueue_set_max_active MUST NOT be called in this hook * so workqueue_set_max_active MUST NOT be called in this hook
*/ */
static inline void thresh_queue_hook(struct __btrfs_workqueue_struct *wq) static inline void thresh_queue_hook(struct __btrfs_workqueue *wq)
{ {
if (wq->thresh == NO_THRESHOLD) if (wq->thresh == NO_THRESHOLD)
return; return;
...@@ -143,7 +143,7 @@ static inline void thresh_queue_hook(struct __btrfs_workqueue_struct *wq) ...@@ -143,7 +143,7 @@ static inline void thresh_queue_hook(struct __btrfs_workqueue_struct *wq)
* This hook is called in kthread content. * This hook is called in kthread content.
* So workqueue_set_max_active is called here. * So workqueue_set_max_active is called here.
*/ */
static inline void thresh_exec_hook(struct __btrfs_workqueue_struct *wq) static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
{ {
int new_max_active; int new_max_active;
long pending; long pending;
...@@ -186,10 +186,10 @@ static inline void thresh_exec_hook(struct __btrfs_workqueue_struct *wq) ...@@ -186,10 +186,10 @@ static inline void thresh_exec_hook(struct __btrfs_workqueue_struct *wq)
} }
} }
static void run_ordered_work(struct __btrfs_workqueue_struct *wq) static void run_ordered_work(struct __btrfs_workqueue *wq)
{ {
struct list_head *list = &wq->ordered_list; struct list_head *list = &wq->ordered_list;
struct btrfs_work_struct *work; struct btrfs_work *work;
spinlock_t *lock = &wq->list_lock; spinlock_t *lock = &wq->list_lock;
unsigned long flags; unsigned long flags;
...@@ -197,7 +197,7 @@ static void run_ordered_work(struct __btrfs_workqueue_struct *wq) ...@@ -197,7 +197,7 @@ static void run_ordered_work(struct __btrfs_workqueue_struct *wq)
spin_lock_irqsave(lock, flags); spin_lock_irqsave(lock, flags);
if (list_empty(list)) if (list_empty(list))
break; break;
work = list_entry(list->next, struct btrfs_work_struct, work = list_entry(list->next, struct btrfs_work,
ordered_list); ordered_list);
if (!test_bit(WORK_DONE_BIT, &work->flags)) if (!test_bit(WORK_DONE_BIT, &work->flags))
break; break;
...@@ -229,11 +229,11 @@ static void run_ordered_work(struct __btrfs_workqueue_struct *wq) ...@@ -229,11 +229,11 @@ static void run_ordered_work(struct __btrfs_workqueue_struct *wq)
static void normal_work_helper(struct work_struct *arg) static void normal_work_helper(struct work_struct *arg)
{ {
struct btrfs_work_struct *work; struct btrfs_work *work;
struct __btrfs_workqueue_struct *wq; struct __btrfs_workqueue *wq;
int need_order = 0; int need_order = 0;
work = container_of(arg, struct btrfs_work_struct, normal_work); work = container_of(arg, struct btrfs_work, normal_work);
/* /*
* We should not touch things inside work in the following cases: * We should not touch things inside work in the following cases:
* 1) after work->func() if it has no ordered_free * 1) after work->func() if it has no ordered_free
...@@ -254,10 +254,10 @@ static void normal_work_helper(struct work_struct *arg) ...@@ -254,10 +254,10 @@ static void normal_work_helper(struct work_struct *arg)
} }
} }
void btrfs_init_work(struct btrfs_work_struct *work, void btrfs_init_work(struct btrfs_work *work,
void (*func)(struct btrfs_work_struct *), void (*func)(struct btrfs_work *),
void (*ordered_func)(struct btrfs_work_struct *), void (*ordered_func)(struct btrfs_work *),
void (*ordered_free)(struct btrfs_work_struct *)) void (*ordered_free)(struct btrfs_work *))
{ {
work->func = func; work->func = func;
work->ordered_func = ordered_func; work->ordered_func = ordered_func;
...@@ -267,8 +267,8 @@ void btrfs_init_work(struct btrfs_work_struct *work, ...@@ -267,8 +267,8 @@ void btrfs_init_work(struct btrfs_work_struct *work,
work->flags = 0; work->flags = 0;
} }
static inline void __btrfs_queue_work(struct __btrfs_workqueue_struct *wq, static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
struct btrfs_work_struct *work) struct btrfs_work *work)
{ {
unsigned long flags; unsigned long flags;
...@@ -282,10 +282,10 @@ static inline void __btrfs_queue_work(struct __btrfs_workqueue_struct *wq, ...@@ -282,10 +282,10 @@ static inline void __btrfs_queue_work(struct __btrfs_workqueue_struct *wq,
queue_work(wq->normal_wq, &work->normal_work); queue_work(wq->normal_wq, &work->normal_work);
} }
void btrfs_queue_work(struct btrfs_workqueue_struct *wq, void btrfs_queue_work(struct btrfs_workqueue *wq,
struct btrfs_work_struct *work) struct btrfs_work *work)
{ {
struct __btrfs_workqueue_struct *dest_wq; struct __btrfs_workqueue *dest_wq;
if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high) if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high)
dest_wq = wq->high; dest_wq = wq->high;
...@@ -295,13 +295,13 @@ void btrfs_queue_work(struct btrfs_workqueue_struct *wq, ...@@ -295,13 +295,13 @@ void btrfs_queue_work(struct btrfs_workqueue_struct *wq,
} }
static inline void static inline void
__btrfs_destroy_workqueue(struct __btrfs_workqueue_struct *wq) __btrfs_destroy_workqueue(struct __btrfs_workqueue *wq)
{ {
destroy_workqueue(wq->normal_wq); destroy_workqueue(wq->normal_wq);
kfree(wq); kfree(wq);
} }
void btrfs_destroy_workqueue(struct btrfs_workqueue_struct *wq) void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
{ {
if (!wq) if (!wq)
return; return;
...@@ -310,14 +310,14 @@ void btrfs_destroy_workqueue(struct btrfs_workqueue_struct *wq) ...@@ -310,14 +310,14 @@ void btrfs_destroy_workqueue(struct btrfs_workqueue_struct *wq)
__btrfs_destroy_workqueue(wq->normal); __btrfs_destroy_workqueue(wq->normal);
} }
void btrfs_workqueue_set_max(struct btrfs_workqueue_struct *wq, int max) void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max)
{ {
wq->normal->max_active = max; wq->normal->max_active = max;
if (wq->high) if (wq->high)
wq->high->max_active = max; wq->high->max_active = max;
} }
void btrfs_set_work_high_priority(struct btrfs_work_struct *work) void btrfs_set_work_high_priority(struct btrfs_work *work)
{ {
set_bit(WORK_HIGH_PRIO_BIT, &work->flags); set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
} }
...@@ -20,33 +20,33 @@ ...@@ -20,33 +20,33 @@
#ifndef __BTRFS_ASYNC_THREAD_ #ifndef __BTRFS_ASYNC_THREAD_
#define __BTRFS_ASYNC_THREAD_ #define __BTRFS_ASYNC_THREAD_
struct btrfs_workqueue_struct; struct btrfs_workqueue;
/* Internal use only */ /* Internal use only */
struct __btrfs_workqueue_struct; struct __btrfs_workqueue;
struct btrfs_work_struct { struct btrfs_work {
void (*func)(struct btrfs_work_struct *arg); void (*func)(struct btrfs_work *arg);
void (*ordered_func)(struct btrfs_work_struct *arg); void (*ordered_func)(struct btrfs_work *arg);
void (*ordered_free)(struct btrfs_work_struct *arg); void (*ordered_free)(struct btrfs_work *arg);
/* Don't touch things below */ /* Don't touch things below */
struct work_struct normal_work; struct work_struct normal_work;
struct list_head ordered_list; struct list_head ordered_list;
struct __btrfs_workqueue_struct *wq; struct __btrfs_workqueue *wq;
unsigned long flags; unsigned long flags;
}; };
struct btrfs_workqueue_struct *btrfs_alloc_workqueue(char *name, struct btrfs_workqueue *btrfs_alloc_workqueue(char *name,
int flags, int flags,
int max_active, int max_active,
int thresh); int thresh);
void btrfs_init_work(struct btrfs_work_struct *work, void btrfs_init_work(struct btrfs_work *work,
void (*func)(struct btrfs_work_struct *), void (*func)(struct btrfs_work *),
void (*ordered_func)(struct btrfs_work_struct *), void (*ordered_func)(struct btrfs_work *),
void (*ordered_free)(struct btrfs_work_struct *)); void (*ordered_free)(struct btrfs_work *));
void btrfs_queue_work(struct btrfs_workqueue_struct *wq, void btrfs_queue_work(struct btrfs_workqueue *wq,
struct btrfs_work_struct *work); struct btrfs_work *work);
void btrfs_destroy_workqueue(struct btrfs_workqueue_struct *wq); void btrfs_destroy_workqueue(struct btrfs_workqueue *wq);
void btrfs_workqueue_set_max(struct btrfs_workqueue_struct *wq, int max); void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max);
void btrfs_set_work_high_priority(struct btrfs_work_struct *work); void btrfs_set_work_high_priority(struct btrfs_work *work);
#endif #endif
...@@ -1221,7 +1221,7 @@ struct btrfs_caching_control { ...@@ -1221,7 +1221,7 @@ struct btrfs_caching_control {
struct list_head list; struct list_head list;
struct mutex mutex; struct mutex mutex;
wait_queue_head_t wait; wait_queue_head_t wait;
struct btrfs_work_struct work; struct btrfs_work work;
struct btrfs_block_group_cache *block_group; struct btrfs_block_group_cache *block_group;
u64 progress; u64 progress;
atomic_t count; atomic_t count;
...@@ -1504,27 +1504,27 @@ struct btrfs_fs_info { ...@@ -1504,27 +1504,27 @@ struct btrfs_fs_info {
* A third pool does submit_bio to avoid deadlocking with the other * A third pool does submit_bio to avoid deadlocking with the other
* two * two
*/ */
struct btrfs_workqueue_struct *workers; struct btrfs_workqueue *workers;
struct btrfs_workqueue_struct *delalloc_workers; struct btrfs_workqueue *delalloc_workers;
struct btrfs_workqueue_struct *flush_workers; struct btrfs_workqueue *flush_workers;
struct btrfs_workqueue_struct *endio_workers; struct btrfs_workqueue *endio_workers;
struct btrfs_workqueue_struct *endio_meta_workers; struct btrfs_workqueue *endio_meta_workers;
struct btrfs_workqueue_struct *endio_raid56_workers; struct btrfs_workqueue *endio_raid56_workers;
struct btrfs_workqueue_struct *rmw_workers; struct btrfs_workqueue *rmw_workers;
struct btrfs_workqueue_struct *endio_meta_write_workers; struct btrfs_workqueue *endio_meta_write_workers;
struct btrfs_workqueue_struct *endio_write_workers; struct btrfs_workqueue *endio_write_workers;
struct btrfs_workqueue_struct *endio_freespace_worker; struct btrfs_workqueue *endio_freespace_worker;
struct btrfs_workqueue_struct *submit_workers; struct btrfs_workqueue *submit_workers;
struct btrfs_workqueue_struct *caching_workers; struct btrfs_workqueue *caching_workers;
struct btrfs_workqueue_struct *readahead_workers; struct btrfs_workqueue *readahead_workers;
/* /*
* fixup workers take dirty pages that didn't properly go through * fixup workers take dirty pages that didn't properly go through
* the cow mechanism and make them safe to write. It happens * the cow mechanism and make them safe to write. It happens
* for the sys_munmap function call path * for the sys_munmap function call path
*/ */
struct btrfs_workqueue_struct *fixup_workers; struct btrfs_workqueue *fixup_workers;
struct btrfs_workqueue_struct *delayed_workers; struct btrfs_workqueue *delayed_workers;
struct task_struct *transaction_kthread; struct task_struct *transaction_kthread;
struct task_struct *cleaner_kthread; struct task_struct *cleaner_kthread;
int thread_pool_size; int thread_pool_size;
...@@ -1604,9 +1604,9 @@ struct btrfs_fs_info { ...@@ -1604,9 +1604,9 @@ struct btrfs_fs_info {
atomic_t scrub_cancel_req; atomic_t scrub_cancel_req;
wait_queue_head_t scrub_pause_wait; wait_queue_head_t scrub_pause_wait;
int scrub_workers_refcnt; int scrub_workers_refcnt;
struct btrfs_workqueue_struct *scrub_workers; struct btrfs_workqueue *scrub_workers;
struct btrfs_workqueue_struct *scrub_wr_completion_workers; struct btrfs_workqueue *scrub_wr_completion_workers;
struct btrfs_workqueue_struct *scrub_nocow_workers; struct btrfs_workqueue *scrub_nocow_workers;
#ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY #ifdef CONFIG_BTRFS_FS_CHECK_INTEGRITY
u32 check_integrity_print_mask; u32 check_integrity_print_mask;
...@@ -1647,9 +1647,9 @@ struct btrfs_fs_info { ...@@ -1647,9 +1647,9 @@ struct btrfs_fs_info {
/* qgroup rescan items */ /* qgroup rescan items */
struct mutex qgroup_rescan_lock; /* protects the progress item */ struct mutex qgroup_rescan_lock; /* protects the progress item */
struct btrfs_key qgroup_rescan_progress; struct btrfs_key qgroup_rescan_progress;
struct btrfs_workqueue_struct *qgroup_rescan_workers; struct btrfs_workqueue *qgroup_rescan_workers;
struct completion qgroup_rescan_completion; struct completion qgroup_rescan_completion;
struct btrfs_work_struct qgroup_rescan_work; struct btrfs_work qgroup_rescan_work;
/* filesystem state */ /* filesystem state */
unsigned long fs_state; unsigned long fs_state;
...@@ -3680,7 +3680,7 @@ struct btrfs_delalloc_work { ...@@ -3680,7 +3680,7 @@ struct btrfs_delalloc_work {
int delay_iput; int delay_iput;
struct completion completion; struct completion completion;
struct list_head list; struct list_head list;
struct btrfs_work_struct work; struct btrfs_work work;
}; };
struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode, struct btrfs_delalloc_work *btrfs_alloc_delalloc_work(struct inode *inode,
......
...@@ -1318,10 +1318,10 @@ void btrfs_remove_delayed_node(struct inode *inode) ...@@ -1318,10 +1318,10 @@ void btrfs_remove_delayed_node(struct inode *inode)
struct btrfs_async_delayed_work { struct btrfs_async_delayed_work {
struct btrfs_delayed_root *delayed_root; struct btrfs_delayed_root *delayed_root;
int nr; int nr;
struct btrfs_work_struct work; struct btrfs_work work;
}; };
static void btrfs_async_run_delayed_root(struct btrfs_work_struct *work) static void btrfs_async_run_delayed_root(struct btrfs_work *work)
{ {
struct btrfs_async_delayed_work *async_work; struct btrfs_async_delayed_work *async_work;
struct btrfs_delayed_root *delayed_root; struct btrfs_delayed_root *delayed_root;
......
...@@ -55,7 +55,7 @@ ...@@ -55,7 +55,7 @@
#endif #endif
static struct extent_io_ops btree_extent_io_ops; static struct extent_io_ops btree_extent_io_ops;
static void end_workqueue_fn(struct btrfs_work_struct *work); static void end_workqueue_fn(struct btrfs_work *work);
static void free_fs_root(struct btrfs_root *root); static void free_fs_root(struct btrfs_root *root);
static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info, static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
int read_only); int read_only);
...@@ -86,7 +86,7 @@ struct end_io_wq { ...@@ -86,7 +86,7 @@ struct end_io_wq {
int error; int error;
int metadata; int metadata;
struct list_head list; struct list_head list;
struct btrfs_work_struct work; struct btrfs_work work;
}; };
/* /*
...@@ -108,7 +108,7 @@ struct async_submit_bio { ...@@ -108,7 +108,7 @@ struct async_submit_bio {
* can't tell us where in the file the bio should go * can't tell us where in the file the bio should go
*/ */
u64 bio_offset; u64 bio_offset;
struct btrfs_work_struct work; struct btrfs_work work;
int error; int error;
}; };
...@@ -742,7 +742,7 @@ unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info) ...@@ -742,7 +742,7 @@ unsigned long btrfs_async_submit_limit(struct btrfs_fs_info *info)
return 256 * limit; return 256 * limit;
} }
static void run_one_async_start(struct btrfs_work_struct *work) static void run_one_async_start(struct btrfs_work *work)
{ {
struct async_submit_bio *async; struct async_submit_bio *async;
int ret; int ret;
...@@ -755,7 +755,7 @@ static void run_one_async_start(struct btrfs_work_struct *work) ...@@ -755,7 +755,7 @@ static void run_one_async_start(struct btrfs_work_struct *work)
async->error = ret; async->error = ret;
} }
static void run_one_async_done(struct btrfs_work_struct *work) static void run_one_async_done(struct btrfs_work *work)
{ {
struct btrfs_fs_info *fs_info; struct btrfs_fs_info *fs_info;
struct async_submit_bio *async; struct async_submit_bio *async;
...@@ -782,7 +782,7 @@ static void run_one_async_done(struct btrfs_work_struct *work) ...@@ -782,7 +782,7 @@ static void run_one_async_done(struct btrfs_work_struct *work)
async->bio_offset); async->bio_offset);
} }
static void run_one_async_free(struct btrfs_work_struct *work) static void run_one_async_free(struct btrfs_work *work)
{ {
struct async_submit_bio *async; struct async_submit_bio *async;
...@@ -1668,7 +1668,7 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi) ...@@ -1668,7 +1668,7 @@ static int setup_bdi(struct btrfs_fs_info *info, struct backing_dev_info *bdi)
* called by the kthread helper functions to finally call the bio end_io * called by the kthread helper functions to finally call the bio end_io
* functions. This is where read checksum verification actually happens * functions. This is where read checksum verification actually happens
*/ */
static void end_workqueue_fn(struct btrfs_work_struct *work) static void end_workqueue_fn(struct btrfs_work *work)
{ {
struct bio *bio; struct bio *bio;
struct end_io_wq *end_io_wq; struct end_io_wq *end_io_wq;
......
...@@ -378,7 +378,7 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group, ...@@ -378,7 +378,7 @@ static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
return total_added; return total_added;
} }
static noinline void caching_thread(struct btrfs_work_struct *work) static noinline void caching_thread(struct btrfs_work *work)
{ {
struct btrfs_block_group_cache *block_group; struct btrfs_block_group_cache *block_group;
struct btrfs_fs_info *fs_info; struct btrfs_fs_info *fs_info;
......
...@@ -324,7 +324,7 @@ struct async_cow { ...@@ -324,7 +324,7 @@ struct async_cow {
u64 start; u64 start;
u64 end; u64 end;
struct list_head extents; struct list_head extents;
struct btrfs_work_struct work; struct btrfs_work work;
}; };
static noinline int add_async_extent(struct async_cow *cow, static noinline int add_async_extent(struct async_cow *cow,
...@@ -1000,7 +1000,7 @@ static noinline int cow_file_range(struct inode *inode, ...@@ -1000,7 +1000,7 @@ static noinline int cow_file_range(struct inode *inode,
/* /*
* work queue call back to started compression on a file and pages * work queue call back to started compression on a file and pages
*/ */
static noinline void async_cow_start(struct btrfs_work_struct *work) static noinline void async_cow_start(struct btrfs_work *work)
{ {
struct async_cow *async_cow; struct async_cow *async_cow;
int num_added = 0; int num_added = 0;
...@@ -1018,7 +1018,7 @@ static noinline void async_cow_start(struct btrfs_work_struct *work) ...@@ -1018,7 +1018,7 @@ static noinline void async_cow_start(struct btrfs_work_struct *work)
/* /*
* work queue call back to submit previously compressed pages * work queue call back to submit previously compressed pages
*/ */
static noinline void async_cow_submit(struct btrfs_work_struct *work) static noinline void async_cow_submit(struct btrfs_work *work)
{ {
struct async_cow *async_cow; struct async_cow *async_cow;
struct btrfs_root *root; struct btrfs_root *root;
...@@ -1039,7 +1039,7 @@ static noinline void async_cow_submit(struct btrfs_work_struct *work) ...@@ -1039,7 +1039,7 @@ static noinline void async_cow_submit(struct btrfs_work_struct *work)
submit_compressed_extents(async_cow->inode, async_cow); submit_compressed_extents(async_cow->inode, async_cow);
} }
static noinline void async_cow_free(struct btrfs_work_struct *work) static noinline void async_cow_free(struct btrfs_work *work)
{ {
struct async_cow *async_cow; struct async_cow *async_cow;
async_cow = container_of(work, struct async_cow, work); async_cow = container_of(work, struct async_cow, work);
...@@ -1748,10 +1748,10 @@ int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end, ...@@ -1748,10 +1748,10 @@ int btrfs_set_extent_delalloc(struct inode *inode, u64 start, u64 end,
/* see btrfs_writepage_start_hook for details on why this is required */ /* see btrfs_writepage_start_hook for details on why this is required */
struct btrfs_writepage_fixup { struct btrfs_writepage_fixup {
struct page *page; struct page *page;
struct btrfs_work_struct work; struct btrfs_work work;
}; };
static void btrfs_writepage_fixup_worker(struct btrfs_work_struct *work) static void btrfs_writepage_fixup_worker(struct btrfs_work *work)
{ {
struct btrfs_writepage_fixup *fixup; struct btrfs_writepage_fixup *fixup;
struct btrfs_ordered_extent *ordered; struct btrfs_ordered_extent *ordered;
...@@ -2750,7 +2750,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent) ...@@ -2750,7 +2750,7 @@ static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
return ret; return ret;
} }
static void finish_ordered_fn(struct btrfs_work_struct *work) static void finish_ordered_fn(struct btrfs_work *work)
{ {
struct btrfs_ordered_extent *ordered_extent; struct btrfs_ordered_extent *ordered_extent;
ordered_extent = container_of(work, struct btrfs_ordered_extent, work); ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
...@@ -2763,7 +2763,7 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, ...@@ -2763,7 +2763,7 @@ static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
struct inode *inode = page->mapping->host; struct inode *inode = page->mapping->host;
struct btrfs_root *root = BTRFS_I(inode)->root; struct btrfs_root *root = BTRFS_I(inode)->root;
struct btrfs_ordered_extent *ordered_extent = NULL; struct btrfs_ordered_extent *ordered_extent = NULL;
struct btrfs_workqueue_struct *workers; struct btrfs_workqueue *workers;
trace_btrfs_writepage_end_io_hook(page, start, end, uptodate); trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
...@@ -8384,7 +8384,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, ...@@ -8384,7 +8384,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
return ret; return ret;
} }
static void btrfs_run_delalloc_work(struct btrfs_work_struct *work) static void btrfs_run_delalloc_work(struct btrfs_work *work)
{ {
struct btrfs_delalloc_work *delalloc_work; struct btrfs_delalloc_work *delalloc_work;
struct inode *inode; struct inode *inode;
......
...@@ -576,7 +576,7 @@ void btrfs_remove_ordered_extent(struct inode *inode, ...@@ -576,7 +576,7 @@ void btrfs_remove_ordered_extent(struct inode *inode,
wake_up(&entry->wait); wake_up(&entry->wait);
} }
static void btrfs_run_ordered_extent_work(struct btrfs_work_struct *work) static void btrfs_run_ordered_extent_work(struct btrfs_work *work)
{ {
struct btrfs_ordered_extent *ordered; struct btrfs_ordered_extent *ordered;
......
...@@ -130,10 +130,10 @@ struct btrfs_ordered_extent { ...@@ -130,10 +130,10 @@ struct btrfs_ordered_extent {
/* a per root list of all the pending ordered extents */ /* a per root list of all the pending ordered extents */
struct list_head root_extent_list; struct list_head root_extent_list;
struct btrfs_work_struct work; struct btrfs_work work;
struct completion completion; struct completion completion;
struct btrfs_work_struct flush_work; struct btrfs_work flush_work;
struct list_head work_list; struct list_head work_list;
}; };
......
...@@ -1984,7 +1984,7 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path, ...@@ -1984,7 +1984,7 @@ qgroup_rescan_leaf(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
return ret; return ret;
} }
static void btrfs_qgroup_rescan_worker(struct btrfs_work_struct *work) static void btrfs_qgroup_rescan_worker(struct btrfs_work *work)
{ {
struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info, struct btrfs_fs_info *fs_info = container_of(work, struct btrfs_fs_info,
qgroup_rescan_work); qgroup_rescan_work);
......
...@@ -87,7 +87,7 @@ struct btrfs_raid_bio { ...@@ -87,7 +87,7 @@ struct btrfs_raid_bio {
/* /*
* for scheduling work in the helper threads * for scheduling work in the helper threads
*/ */
struct btrfs_work_struct work; struct btrfs_work work;
/* /*
* bio list and bio_list_lock are used * bio list and bio_list_lock are used
...@@ -166,8 +166,8 @@ struct btrfs_raid_bio { ...@@ -166,8 +166,8 @@ struct btrfs_raid_bio {
static int __raid56_parity_recover(struct btrfs_raid_bio *rbio); static int __raid56_parity_recover(struct btrfs_raid_bio *rbio);
static noinline void finish_rmw(struct btrfs_raid_bio *rbio); static noinline void finish_rmw(struct btrfs_raid_bio *rbio);
static void rmw_work(struct btrfs_work_struct *work); static void rmw_work(struct btrfs_work *work);
static void read_rebuild_work(struct btrfs_work_struct *work); static void read_rebuild_work(struct btrfs_work *work);
static void async_rmw_stripe(struct btrfs_raid_bio *rbio); static void async_rmw_stripe(struct btrfs_raid_bio *rbio);
static void async_read_rebuild(struct btrfs_raid_bio *rbio); static void async_read_rebuild(struct btrfs_raid_bio *rbio);
static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio); static int fail_bio_stripe(struct btrfs_raid_bio *rbio, struct bio *bio);
...@@ -1588,7 +1588,7 @@ struct btrfs_plug_cb { ...@@ -1588,7 +1588,7 @@ struct btrfs_plug_cb {
struct blk_plug_cb cb; struct blk_plug_cb cb;
struct btrfs_fs_info *info; struct btrfs_fs_info *info;
struct list_head rbio_list; struct list_head rbio_list;
struct btrfs_work_struct work; struct btrfs_work work;
}; };
/* /*
...@@ -1652,7 +1652,7 @@ static void run_plug(struct btrfs_plug_cb *plug) ...@@ -1652,7 +1652,7 @@ static void run_plug(struct btrfs_plug_cb *plug)
* if the unplug comes from schedule, we have to push the * if the unplug comes from schedule, we have to push the
* work off to a helper thread * work off to a helper thread
*/ */
static void unplug_work(struct btrfs_work_struct *work) static void unplug_work(struct btrfs_work *work)
{ {
struct btrfs_plug_cb *plug; struct btrfs_plug_cb *plug;
plug = container_of(work, struct btrfs_plug_cb, work); plug = container_of(work, struct btrfs_plug_cb, work);
...@@ -2079,7 +2079,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio, ...@@ -2079,7 +2079,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
} }
static void rmw_work(struct btrfs_work_struct *work) static void rmw_work(struct btrfs_work *work)
{ {
struct btrfs_raid_bio *rbio; struct btrfs_raid_bio *rbio;
...@@ -2087,7 +2087,7 @@ static void rmw_work(struct btrfs_work_struct *work) ...@@ -2087,7 +2087,7 @@ static void rmw_work(struct btrfs_work_struct *work)
raid56_rmw_stripe(rbio); raid56_rmw_stripe(rbio);
} }
static void read_rebuild_work(struct btrfs_work_struct *work) static void read_rebuild_work(struct btrfs_work *work)
{ {
struct btrfs_raid_bio *rbio; struct btrfs_raid_bio *rbio;
......
...@@ -91,8 +91,7 @@ struct reada_zone { ...@@ -91,8 +91,7 @@ struct reada_zone {
}; };
struct reada_machine_work { struct reada_machine_work {
struct btrfs_work_struct struct btrfs_work work;
work;
struct btrfs_fs_info *fs_info; struct btrfs_fs_info *fs_info;
}; };
...@@ -734,7 +733,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info, ...@@ -734,7 +733,7 @@ static int reada_start_machine_dev(struct btrfs_fs_info *fs_info,
} }
static void reada_start_machine_worker(struct btrfs_work_struct *work) static void reada_start_machine_worker(struct btrfs_work *work)
{ {
struct reada_machine_work *rmw; struct reada_machine_work *rmw;
struct btrfs_fs_info *fs_info; struct btrfs_fs_info *fs_info;
......
...@@ -96,8 +96,7 @@ struct scrub_bio { ...@@ -96,8 +96,7 @@ struct scrub_bio {
#endif #endif
int page_count; int page_count;
int next_free; int next_free;
struct btrfs_work_struct struct btrfs_work work;
work;
}; };
struct scrub_block { struct scrub_block {
...@@ -155,8 +154,7 @@ struct scrub_fixup_nodatasum { ...@@ -155,8 +154,7 @@ struct scrub_fixup_nodatasum {
struct btrfs_device *dev; struct btrfs_device *dev;
u64 logical; u64 logical;
struct btrfs_root *root; struct btrfs_root *root;
struct btrfs_work_struct struct btrfs_work work;
work;
int mirror_num; int mirror_num;
}; };
...@@ -174,8 +172,7 @@ struct scrub_copy_nocow_ctx { ...@@ -174,8 +172,7 @@ struct scrub_copy_nocow_ctx {
int mirror_num; int mirror_num;
u64 physical_for_dev_replace; u64 physical_for_dev_replace;
struct list_head inodes; struct list_head inodes;
struct btrfs_work_struct struct btrfs_work work;
work;
}; };
struct scrub_warning { struct scrub_warning {
...@@ -234,7 +231,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len, ...@@ -234,7 +231,7 @@ static int scrub_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
u64 gen, int mirror_num, u8 *csum, int force, u64 gen, int mirror_num, u8 *csum, int force,
u64 physical_for_dev_replace); u64 physical_for_dev_replace);
static void scrub_bio_end_io(struct bio *bio, int err); static void scrub_bio_end_io(struct bio *bio, int err);
static void scrub_bio_end_io_worker(struct btrfs_work_struct *work); static void scrub_bio_end_io_worker(struct btrfs_work *work);
static void scrub_block_complete(struct scrub_block *sblock); static void scrub_block_complete(struct scrub_block *sblock);
static void scrub_remap_extent(struct btrfs_fs_info *fs_info, static void scrub_remap_extent(struct btrfs_fs_info *fs_info,
u64 extent_logical, u64 extent_len, u64 extent_logical, u64 extent_len,
...@@ -251,14 +248,14 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx, ...@@ -251,14 +248,14 @@ static int scrub_add_page_to_wr_bio(struct scrub_ctx *sctx,
struct scrub_page *spage); struct scrub_page *spage);
static void scrub_wr_submit(struct scrub_ctx *sctx); static void scrub_wr_submit(struct scrub_ctx *sctx);
static void scrub_wr_bio_end_io(struct bio *bio, int err); static void scrub_wr_bio_end_io(struct bio *bio, int err);
static void scrub_wr_bio_end_io_worker(struct btrfs_work_struct *work); static void scrub_wr_bio_end_io_worker(struct btrfs_work *work);
static int write_page_nocow(struct scrub_ctx *sctx, static int write_page_nocow(struct scrub_ctx *sctx,
u64 physical_for_dev_replace, struct page *page); u64 physical_for_dev_replace, struct page *page);
static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root, static int copy_nocow_pages_for_inode(u64 inum, u64 offset, u64 root,
struct scrub_copy_nocow_ctx *ctx); struct scrub_copy_nocow_ctx *ctx);
static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len, static int copy_nocow_pages(struct scrub_ctx *sctx, u64 logical, u64 len,
int mirror_num, u64 physical_for_dev_replace); int mirror_num, u64 physical_for_dev_replace);
static void copy_nocow_pages_worker(struct btrfs_work_struct *work); static void copy_nocow_pages_worker(struct btrfs_work *work);
static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info); static void __scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info); static void scrub_blocked_if_needed(struct btrfs_fs_info *fs_info);
...@@ -737,7 +734,7 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx) ...@@ -737,7 +734,7 @@ static int scrub_fixup_readpage(u64 inum, u64 offset, u64 root, void *fixup_ctx)
return -EIO; return -EIO;
} }
static void scrub_fixup_nodatasum(struct btrfs_work_struct *work) static void scrub_fixup_nodatasum(struct btrfs_work *work)
{ {
int ret; int ret;
struct scrub_fixup_nodatasum *fixup; struct scrub_fixup_nodatasum *fixup;
...@@ -1622,7 +1619,7 @@ static void scrub_wr_bio_end_io(struct bio *bio, int err) ...@@ -1622,7 +1619,7 @@ static void scrub_wr_bio_end_io(struct bio *bio, int err)
btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work); btrfs_queue_work(fs_info->scrub_wr_completion_workers, &sbio->work);
} }
static void scrub_wr_bio_end_io_worker(struct btrfs_work_struct *work) static void scrub_wr_bio_end_io_worker(struct btrfs_work *work)
{ {
struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
struct scrub_ctx *sctx = sbio->sctx; struct scrub_ctx *sctx = sbio->sctx;
...@@ -2090,7 +2087,7 @@ static void scrub_bio_end_io(struct bio *bio, int err) ...@@ -2090,7 +2087,7 @@ static void scrub_bio_end_io(struct bio *bio, int err)
btrfs_queue_work(fs_info->scrub_workers, &sbio->work); btrfs_queue_work(fs_info->scrub_workers, &sbio->work);
} }
static void scrub_bio_end_io_worker(struct btrfs_work_struct *work) static void scrub_bio_end_io_worker(struct btrfs_work *work)
{ {
struct scrub_bio *sbio = container_of(work, struct scrub_bio, work); struct scrub_bio *sbio = container_of(work, struct scrub_bio, work);
struct scrub_ctx *sctx = sbio->sctx; struct scrub_ctx *sctx = sbio->sctx;
...@@ -3161,7 +3158,7 @@ static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx) ...@@ -3161,7 +3158,7 @@ static int record_inode_for_nocow(u64 inum, u64 offset, u64 root, void *ctx)
#define COPY_COMPLETE 1 #define COPY_COMPLETE 1
static void copy_nocow_pages_worker(struct btrfs_work_struct *work) static void copy_nocow_pages_worker(struct btrfs_work *work)
{ {
struct scrub_copy_nocow_ctx *nocow_ctx = struct scrub_copy_nocow_ctx *nocow_ctx =
container_of(work, struct scrub_copy_nocow_ctx, work); container_of(work, struct scrub_copy_nocow_ctx, work);
......
...@@ -440,7 +440,7 @@ static noinline void run_scheduled_bios(struct btrfs_device *device) ...@@ -440,7 +440,7 @@ static noinline void run_scheduled_bios(struct btrfs_device *device)
blk_finish_plug(&plug); blk_finish_plug(&plug);
} }
static void pending_bios_fn(struct btrfs_work_struct *work) static void pending_bios_fn(struct btrfs_work *work)
{ {
struct btrfs_device *device; struct btrfs_device *device;
......
...@@ -95,7 +95,7 @@ struct btrfs_device { ...@@ -95,7 +95,7 @@ struct btrfs_device {
/* per-device scrub information */ /* per-device scrub information */
struct scrub_ctx *scrub_device; struct scrub_ctx *scrub_device;
struct btrfs_work_struct work; struct btrfs_work work;
struct rcu_head rcu; struct rcu_head rcu;
struct work_struct rcu_work; struct work_struct rcu_work;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment