Commit a31b4a43 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by David Sterba

btrfs: simplify WQ_HIGHPRI handling in struct btrfs_workqueue

Just let the one caller that wants optional WQ_HIGHPRI handling allocate
a separate btrfs_workqueue for that.  This allows to rename struct
__btrfs_workqueue to btrfs_workqueue, remove a pointer indirection and
separate allocation for all btrfs_workqueue users and generally simplify
the code.
Reviewed-by: default avatarQu Wenruo <wqu@suse.com>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent a7b8e39c
...@@ -15,13 +15,12 @@ ...@@ -15,13 +15,12 @@
enum { enum {
WORK_DONE_BIT, WORK_DONE_BIT,
WORK_ORDER_DONE_BIT, WORK_ORDER_DONE_BIT,
WORK_HIGH_PRIO_BIT,
}; };
#define NO_THRESHOLD (-1) #define NO_THRESHOLD (-1)
#define DFT_THRESHOLD (32) #define DFT_THRESHOLD (32)
struct __btrfs_workqueue { struct btrfs_workqueue {
struct workqueue_struct *normal_wq; struct workqueue_struct *normal_wq;
/* File system this workqueue services */ /* File system this workqueue services */
...@@ -48,12 +47,7 @@ struct __btrfs_workqueue { ...@@ -48,12 +47,7 @@ struct __btrfs_workqueue {
spinlock_t thres_lock; spinlock_t thres_lock;
}; };
struct btrfs_workqueue { struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct btrfs_workqueue *wq)
struct __btrfs_workqueue *normal;
struct __btrfs_workqueue *high;
};
struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct __btrfs_workqueue *wq)
{ {
return wq->fs_info; return wq->fs_info;
} }
...@@ -66,22 +60,22 @@ struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work) ...@@ -66,22 +60,22 @@ struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work)
bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq) bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq)
{ {
/* /*
* We could compare wq->normal->pending with num_online_cpus() * We could compare wq->pending with num_online_cpus()
* to support "thresh == NO_THRESHOLD" case, but it requires * to support "thresh == NO_THRESHOLD" case, but it requires
* moving up atomic_inc/dec in thresh_queue/exec_hook. Let's * moving up atomic_inc/dec in thresh_queue/exec_hook. Let's
* postpone it until someone needs the support of that case. * postpone it until someone needs the support of that case.
*/ */
if (wq->normal->thresh == NO_THRESHOLD) if (wq->thresh == NO_THRESHOLD)
return false; return false;
return atomic_read(&wq->normal->pending) > wq->normal->thresh * 2; return atomic_read(&wq->pending) > wq->thresh * 2;
} }
static struct __btrfs_workqueue * struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
__btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name, const char *name, unsigned int flags,
unsigned int flags, int limit_active, int thresh) int limit_active, int thresh)
{ {
struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL); struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret) if (!ret)
return NULL; return NULL;
...@@ -105,12 +99,8 @@ __btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name, ...@@ -105,12 +99,8 @@ __btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name,
ret->thresh = thresh; ret->thresh = thresh;
} }
if (flags & WQ_HIGHPRI) ret->normal_wq = alloc_workqueue("btrfs-%s", flags, ret->current_active,
ret->normal_wq = alloc_workqueue("btrfs-%s-high", flags, name);
ret->current_active, name);
else
ret->normal_wq = alloc_workqueue("btrfs-%s", flags,
ret->current_active, name);
if (!ret->normal_wq) { if (!ret->normal_wq) {
kfree(ret); kfree(ret);
return NULL; return NULL;
...@@ -119,41 +109,7 @@ __btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name, ...@@ -119,41 +109,7 @@ __btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, const char *name,
INIT_LIST_HEAD(&ret->ordered_list); INIT_LIST_HEAD(&ret->ordered_list);
spin_lock_init(&ret->list_lock); spin_lock_init(&ret->list_lock);
spin_lock_init(&ret->thres_lock); spin_lock_init(&ret->thres_lock);
trace_btrfs_workqueue_alloc(ret, name, flags & WQ_HIGHPRI); trace_btrfs_workqueue_alloc(ret, name);
return ret;
}
static inline void
__btrfs_destroy_workqueue(struct __btrfs_workqueue *wq);
struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
const char *name,
unsigned int flags,
int limit_active,
int thresh)
{
struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
if (!ret)
return NULL;
ret->normal = __btrfs_alloc_workqueue(fs_info, name,
flags & ~WQ_HIGHPRI,
limit_active, thresh);
if (!ret->normal) {
kfree(ret);
return NULL;
}
if (flags & WQ_HIGHPRI) {
ret->high = __btrfs_alloc_workqueue(fs_info, name, flags,
limit_active, thresh);
if (!ret->high) {
__btrfs_destroy_workqueue(ret->normal);
kfree(ret);
return NULL;
}
}
return ret; return ret;
} }
...@@ -162,7 +118,7 @@ struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info, ...@@ -162,7 +118,7 @@ struct btrfs_workqueue *btrfs_alloc_workqueue(struct btrfs_fs_info *fs_info,
* This hook WILL be called in IRQ handler context, * This hook WILL be called in IRQ handler context,
* so workqueue_set_max_active MUST NOT be called in this hook * so workqueue_set_max_active MUST NOT be called in this hook
*/ */
static inline void thresh_queue_hook(struct __btrfs_workqueue *wq) static inline void thresh_queue_hook(struct btrfs_workqueue *wq)
{ {
if (wq->thresh == NO_THRESHOLD) if (wq->thresh == NO_THRESHOLD)
return; return;
...@@ -174,7 +130,7 @@ static inline void thresh_queue_hook(struct __btrfs_workqueue *wq) ...@@ -174,7 +130,7 @@ static inline void thresh_queue_hook(struct __btrfs_workqueue *wq)
* This hook is called in kthread content. * This hook is called in kthread content.
* So workqueue_set_max_active is called here. * So workqueue_set_max_active is called here.
*/ */
static inline void thresh_exec_hook(struct __btrfs_workqueue *wq) static inline void thresh_exec_hook(struct btrfs_workqueue *wq)
{ {
int new_current_active; int new_current_active;
long pending; long pending;
...@@ -217,7 +173,7 @@ static inline void thresh_exec_hook(struct __btrfs_workqueue *wq) ...@@ -217,7 +173,7 @@ static inline void thresh_exec_hook(struct __btrfs_workqueue *wq)
} }
} }
static void run_ordered_work(struct __btrfs_workqueue *wq, static void run_ordered_work(struct btrfs_workqueue *wq,
struct btrfs_work *self) struct btrfs_work *self)
{ {
struct list_head *list = &wq->ordered_list; struct list_head *list = &wq->ordered_list;
...@@ -305,7 +261,7 @@ static void btrfs_work_helper(struct work_struct *normal_work) ...@@ -305,7 +261,7 @@ static void btrfs_work_helper(struct work_struct *normal_work)
{ {
struct btrfs_work *work = container_of(normal_work, struct btrfs_work, struct btrfs_work *work = container_of(normal_work, struct btrfs_work,
normal_work); normal_work);
struct __btrfs_workqueue *wq; struct btrfs_workqueue *wq = work->wq;
int need_order = 0; int need_order = 0;
/* /*
...@@ -318,7 +274,6 @@ static void btrfs_work_helper(struct work_struct *normal_work) ...@@ -318,7 +274,6 @@ static void btrfs_work_helper(struct work_struct *normal_work)
*/ */
if (work->ordered_func) if (work->ordered_func)
need_order = 1; need_order = 1;
wq = work->wq;
trace_btrfs_work_sched(work); trace_btrfs_work_sched(work);
thresh_exec_hook(wq); thresh_exec_hook(wq);
...@@ -350,8 +305,7 @@ void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func, ...@@ -350,8 +305,7 @@ void btrfs_init_work(struct btrfs_work *work, btrfs_func_t func,
work->flags = 0; work->flags = 0;
} }
static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq, void btrfs_queue_work(struct btrfs_workqueue *wq, struct btrfs_work *work)
struct btrfs_work *work)
{ {
unsigned long flags; unsigned long flags;
...@@ -366,54 +320,22 @@ static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq, ...@@ -366,54 +320,22 @@ static inline void __btrfs_queue_work(struct __btrfs_workqueue *wq,
queue_work(wq->normal_wq, &work->normal_work); queue_work(wq->normal_wq, &work->normal_work);
} }
void btrfs_queue_work(struct btrfs_workqueue *wq,
struct btrfs_work *work)
{
struct __btrfs_workqueue *dest_wq;
if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high)
dest_wq = wq->high;
else
dest_wq = wq->normal;
__btrfs_queue_work(dest_wq, work);
}
static inline void
__btrfs_destroy_workqueue(struct __btrfs_workqueue *wq)
{
destroy_workqueue(wq->normal_wq);
trace_btrfs_workqueue_destroy(wq);
kfree(wq);
}
void btrfs_destroy_workqueue(struct btrfs_workqueue *wq) void btrfs_destroy_workqueue(struct btrfs_workqueue *wq)
{ {
if (!wq) if (!wq)
return; return;
if (wq->high) destroy_workqueue(wq->normal_wq);
__btrfs_destroy_workqueue(wq->high); trace_btrfs_workqueue_destroy(wq);
__btrfs_destroy_workqueue(wq->normal);
kfree(wq); kfree(wq);
} }
void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active) void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int limit_active)
{ {
if (!wq) if (wq)
return; wq->limit_active = limit_active;
wq->normal->limit_active = limit_active;
if (wq->high)
wq->high->limit_active = limit_active;
}
void btrfs_set_work_high_priority(struct btrfs_work *work)
{
set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
} }
void btrfs_flush_workqueue(struct btrfs_workqueue *wq) void btrfs_flush_workqueue(struct btrfs_workqueue *wq)
{ {
if (wq->high) flush_workqueue(wq->normal_wq);
flush_workqueue(wq->high->normal_wq);
flush_workqueue(wq->normal->normal_wq);
} }
...@@ -11,8 +11,6 @@ ...@@ -11,8 +11,6 @@
struct btrfs_fs_info; struct btrfs_fs_info;
struct btrfs_workqueue; struct btrfs_workqueue;
/* Internal use only */
struct __btrfs_workqueue;
struct btrfs_work; struct btrfs_work;
typedef void (*btrfs_func_t)(struct btrfs_work *arg); typedef void (*btrfs_func_t)(struct btrfs_work *arg);
typedef void (*btrfs_work_func_t)(struct work_struct *arg); typedef void (*btrfs_work_func_t)(struct work_struct *arg);
...@@ -25,7 +23,7 @@ struct btrfs_work { ...@@ -25,7 +23,7 @@ struct btrfs_work {
/* Don't touch things below */ /* Don't touch things below */
struct work_struct normal_work; struct work_struct normal_work;
struct list_head ordered_list; struct list_head ordered_list;
struct __btrfs_workqueue *wq; struct btrfs_workqueue *wq;
unsigned long flags; unsigned long flags;
}; };
...@@ -40,9 +38,8 @@ void btrfs_queue_work(struct btrfs_workqueue *wq, ...@@ -40,9 +38,8 @@ void btrfs_queue_work(struct btrfs_workqueue *wq,
struct btrfs_work *work); struct btrfs_work *work);
void btrfs_destroy_workqueue(struct btrfs_workqueue *wq); void btrfs_destroy_workqueue(struct btrfs_workqueue *wq);
void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max); void btrfs_workqueue_set_max(struct btrfs_workqueue *wq, int max);
void btrfs_set_work_high_priority(struct btrfs_work *work);
struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work); struct btrfs_fs_info * __pure btrfs_work_owner(const struct btrfs_work *work);
struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct __btrfs_workqueue *wq); struct btrfs_fs_info * __pure btrfs_workqueue_owner(const struct btrfs_workqueue *wq);
bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq); bool btrfs_workqueue_normal_congested(const struct btrfs_workqueue *wq);
void btrfs_flush_workqueue(struct btrfs_workqueue *wq); void btrfs_flush_workqueue(struct btrfs_workqueue *wq);
......
...@@ -847,6 +847,7 @@ struct btrfs_fs_info { ...@@ -847,6 +847,7 @@ struct btrfs_fs_info {
* two * two
*/ */
struct btrfs_workqueue *workers; struct btrfs_workqueue *workers;
struct btrfs_workqueue *hipri_workers;
struct btrfs_workqueue *delalloc_workers; struct btrfs_workqueue *delalloc_workers;
struct btrfs_workqueue *flush_workers; struct btrfs_workqueue *flush_workers;
struct btrfs_workqueue *endio_workers; struct btrfs_workqueue *endio_workers;
......
...@@ -874,8 +874,8 @@ blk_status_t btrfs_wq_submit_bio(struct inode *inode, struct bio *bio, ...@@ -874,8 +874,8 @@ blk_status_t btrfs_wq_submit_bio(struct inode *inode, struct bio *bio,
async->status = 0; async->status = 0;
if (op_is_sync(bio->bi_opf)) if (op_is_sync(bio->bi_opf))
btrfs_set_work_high_priority(&async->work); btrfs_queue_work(fs_info->hipri_workers, &async->work);
else
btrfs_queue_work(fs_info->workers, &async->work); btrfs_queue_work(fs_info->workers, &async->work);
return 0; return 0;
} }
...@@ -2279,6 +2279,7 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info) ...@@ -2279,6 +2279,7 @@ static void btrfs_stop_all_workers(struct btrfs_fs_info *fs_info)
{ {
btrfs_destroy_workqueue(fs_info->fixup_workers); btrfs_destroy_workqueue(fs_info->fixup_workers);
btrfs_destroy_workqueue(fs_info->delalloc_workers); btrfs_destroy_workqueue(fs_info->delalloc_workers);
btrfs_destroy_workqueue(fs_info->hipri_workers);
btrfs_destroy_workqueue(fs_info->workers); btrfs_destroy_workqueue(fs_info->workers);
btrfs_destroy_workqueue(fs_info->endio_workers); btrfs_destroy_workqueue(fs_info->endio_workers);
btrfs_destroy_workqueue(fs_info->endio_raid56_workers); btrfs_destroy_workqueue(fs_info->endio_raid56_workers);
...@@ -2457,7 +2458,9 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info) ...@@ -2457,7 +2458,9 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND; unsigned int flags = WQ_MEM_RECLAIM | WQ_FREEZABLE | WQ_UNBOUND;
fs_info->workers = fs_info->workers =
btrfs_alloc_workqueue(fs_info, "worker", btrfs_alloc_workqueue(fs_info, "worker", flags, max_active, 16);
fs_info->hipri_workers =
btrfs_alloc_workqueue(fs_info, "worker-high",
flags | WQ_HIGHPRI, max_active, 16); flags | WQ_HIGHPRI, max_active, 16);
fs_info->delalloc_workers = fs_info->delalloc_workers =
...@@ -2505,8 +2508,8 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info) ...@@ -2505,8 +2508,8 @@ static int btrfs_init_workqueues(struct btrfs_fs_info *fs_info)
fs_info->discard_ctl.discard_workers = fs_info->discard_ctl.discard_workers =
alloc_workqueue("btrfs_discard", WQ_UNBOUND | WQ_FREEZABLE, 1); alloc_workqueue("btrfs_discard", WQ_UNBOUND | WQ_FREEZABLE, 1);
if (!(fs_info->workers && fs_info->delalloc_workers && if (!(fs_info->workers && fs_info->hipri_workers &&
fs_info->flush_workers && fs_info->delalloc_workers && fs_info->flush_workers &&
fs_info->endio_workers && fs_info->endio_meta_workers && fs_info->endio_workers && fs_info->endio_meta_workers &&
fs_info->endio_meta_write_workers && fs_info->endio_meta_write_workers &&
fs_info->endio_write_workers && fs_info->endio_raid56_workers && fs_info->endio_write_workers && fs_info->endio_raid56_workers &&
......
...@@ -1903,6 +1903,7 @@ static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info, ...@@ -1903,6 +1903,7 @@ static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
old_pool_size, new_pool_size); old_pool_size, new_pool_size);
btrfs_workqueue_set_max(fs_info->workers, new_pool_size); btrfs_workqueue_set_max(fs_info->workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->hipri_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size); btrfs_workqueue_set_max(fs_info->delalloc_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size); btrfs_workqueue_set_max(fs_info->caching_workers, new_pool_size);
btrfs_workqueue_set_max(fs_info->endio_workers, new_pool_size); btrfs_workqueue_set_max(fs_info->endio_workers, new_pool_size);
......
...@@ -24,7 +24,7 @@ struct btrfs_free_cluster; ...@@ -24,7 +24,7 @@ struct btrfs_free_cluster;
struct map_lookup; struct map_lookup;
struct extent_buffer; struct extent_buffer;
struct btrfs_work; struct btrfs_work;
struct __btrfs_workqueue; struct btrfs_workqueue;
struct btrfs_qgroup_extent_record; struct btrfs_qgroup_extent_record;
struct btrfs_qgroup; struct btrfs_qgroup;
struct extent_io_tree; struct extent_io_tree;
...@@ -1457,42 +1457,36 @@ DEFINE_EVENT(btrfs__work, btrfs_ordered_sched, ...@@ -1457,42 +1457,36 @@ DEFINE_EVENT(btrfs__work, btrfs_ordered_sched,
TP_ARGS(work) TP_ARGS(work)
); );
DECLARE_EVENT_CLASS(btrfs__workqueue, DECLARE_EVENT_CLASS(btrfs_workqueue,
TP_PROTO(const struct __btrfs_workqueue *wq, TP_PROTO(const struct btrfs_workqueue *wq, const char *name),
const char *name, int high),
TP_ARGS(wq, name, high), TP_ARGS(wq, name),
TP_STRUCT__entry_btrfs( TP_STRUCT__entry_btrfs(
__field( const void *, wq ) __field( const void *, wq )
__string( name, name ) __string( name, name )
__field( int , high )
), ),
TP_fast_assign_btrfs(btrfs_workqueue_owner(wq), TP_fast_assign_btrfs(btrfs_workqueue_owner(wq),
__entry->wq = wq; __entry->wq = wq;
__assign_str(name, name); __assign_str(name, name);
__entry->high = high;
), ),
TP_printk_btrfs("name=%s%s wq=%p", __get_str(name), TP_printk_btrfs("name=%s wq=%p", __get_str(name),
__print_flags(__entry->high, "",
{(WQ_HIGHPRI), "-high"}),
__entry->wq) __entry->wq)
); );
DEFINE_EVENT(btrfs__workqueue, btrfs_workqueue_alloc, DEFINE_EVENT(btrfs_workqueue, btrfs_workqueue_alloc,
TP_PROTO(const struct __btrfs_workqueue *wq, TP_PROTO(const struct btrfs_workqueue *wq, const char *name),
const char *name, int high),
TP_ARGS(wq, name, high) TP_ARGS(wq, name)
); );
DECLARE_EVENT_CLASS(btrfs__workqueue_done, DECLARE_EVENT_CLASS(btrfs_workqueue_done,
TP_PROTO(const struct __btrfs_workqueue *wq), TP_PROTO(const struct btrfs_workqueue *wq),
TP_ARGS(wq), TP_ARGS(wq),
...@@ -1507,9 +1501,9 @@ DECLARE_EVENT_CLASS(btrfs__workqueue_done, ...@@ -1507,9 +1501,9 @@ DECLARE_EVENT_CLASS(btrfs__workqueue_done,
TP_printk_btrfs("wq=%p", __entry->wq) TP_printk_btrfs("wq=%p", __entry->wq)
); );
DEFINE_EVENT(btrfs__workqueue_done, btrfs_workqueue_destroy, DEFINE_EVENT(btrfs_workqueue_done, btrfs_workqueue_destroy,
TP_PROTO(const struct __btrfs_workqueue *wq), TP_PROTO(const struct btrfs_workqueue *wq),
TP_ARGS(wq) TP_ARGS(wq)
); );
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment