Commit 1ca08976 authored by Qu Wenruo's avatar Qu Wenruo Committed by Josef Bacik

btrfs: Add high priority workqueue support for btrfs_workqueue_struct

Add high priority function to btrfs_workqueue.

This is implemented by embedding a btrfs_workqueue into a
btrfs_workqueue and use some helper functions to differ the normal
priority wq and high priority wq.
So the high priority wq is completely independent from the normal
workqueue.
Signed-off-by: default avatarQu Wenruo <quwenruo@cn.fujitsu.com>
Tested-by: default avatarDavid Sterba <dsterba@suse.cz>
Signed-off-by: default avatarJosef Bacik <jbacik@fb.com>
parent 08a9ff32
......@@ -730,7 +730,7 @@ void btrfs_queue_worker(struct btrfs_workers *workers, struct btrfs_work *work)
spin_unlock_irqrestore(&worker->lock, flags);
}
struct btrfs_workqueue_struct {
struct __btrfs_workqueue_struct {
struct workqueue_struct *normal_wq;
/* List head pointing to ordered work list */
struct list_head ordered_list;
......@@ -739,6 +739,38 @@ struct btrfs_workqueue_struct {
spinlock_t list_lock;
};
struct btrfs_workqueue_struct {
struct __btrfs_workqueue_struct *normal;
struct __btrfs_workqueue_struct *high;
};
static inline struct __btrfs_workqueue_struct
*__btrfs_alloc_workqueue(char *name, int flags, int max_active)
{
struct __btrfs_workqueue_struct *ret = kzalloc(sizeof(*ret), GFP_NOFS);
if (unlikely(!ret))
return NULL;
if (flags & WQ_HIGHPRI)
ret->normal_wq = alloc_workqueue("%s-%s-high", flags,
max_active, "btrfs", name);
else
ret->normal_wq = alloc_workqueue("%s-%s", flags,
max_active, "btrfs", name);
if (unlikely(!ret->normal_wq)) {
kfree(ret);
return NULL;
}
INIT_LIST_HEAD(&ret->ordered_list);
spin_lock_init(&ret->list_lock);
return ret;
}
static inline void
__btrfs_destroy_workqueue(struct __btrfs_workqueue_struct *wq);
struct btrfs_workqueue_struct *btrfs_alloc_workqueue(char *name,
int flags,
int max_active)
......@@ -748,19 +780,25 @@ struct btrfs_workqueue_struct *btrfs_alloc_workqueue(char *name,
if (unlikely(!ret))
return NULL;
ret->normal_wq = alloc_workqueue("%s-%s", flags, max_active,
"btrfs", name);
if (unlikely(!ret->normal_wq)) {
ret->normal = __btrfs_alloc_workqueue(name, flags & ~WQ_HIGHPRI,
max_active);
if (unlikely(!ret->normal)) {
kfree(ret);
return NULL;
}
INIT_LIST_HEAD(&ret->ordered_list);
spin_lock_init(&ret->list_lock);
if (flags & WQ_HIGHPRI) {
ret->high = __btrfs_alloc_workqueue(name, flags, max_active);
if (unlikely(!ret->high)) {
__btrfs_destroy_workqueue(ret->normal);
kfree(ret);
return NULL;
}
}
return ret;
}
static void run_ordered_work(struct btrfs_workqueue_struct *wq)
static void run_ordered_work(struct __btrfs_workqueue_struct *wq)
{
struct list_head *list = &wq->ordered_list;
struct btrfs_work_struct *work;
......@@ -804,7 +842,7 @@ static void run_ordered_work(struct btrfs_workqueue_struct *wq)
static void normal_work_helper(struct work_struct *arg)
{
struct btrfs_work_struct *work;
struct btrfs_workqueue_struct *wq;
struct __btrfs_workqueue_struct *wq;
int need_order = 0;
work = container_of(arg, struct btrfs_work_struct, normal_work);
......@@ -840,8 +878,8 @@ void btrfs_init_work(struct btrfs_work_struct *work,
work->flags = 0;
}
void btrfs_queue_work(struct btrfs_workqueue_struct *wq,
struct btrfs_work_struct *work)
static inline void __btrfs_queue_work(struct __btrfs_workqueue_struct *wq,
struct btrfs_work_struct *work)
{
unsigned long flags;
......@@ -854,13 +892,42 @@ void btrfs_queue_work(struct btrfs_workqueue_struct *wq,
queue_work(wq->normal_wq, &work->normal_work);
}
void btrfs_destroy_workqueue(struct btrfs_workqueue_struct *wq)
void btrfs_queue_work(struct btrfs_workqueue_struct *wq,
struct btrfs_work_struct *work)
{
struct __btrfs_workqueue_struct *dest_wq;
if (test_bit(WORK_HIGH_PRIO_BIT, &work->flags) && wq->high)
dest_wq = wq->high;
else
dest_wq = wq->normal;
__btrfs_queue_work(dest_wq, work);
}
static inline void
__btrfs_destroy_workqueue(struct __btrfs_workqueue_struct *wq)
{
destroy_workqueue(wq->normal_wq);
kfree(wq);
}
void btrfs_destroy_workqueue(struct btrfs_workqueue_struct *wq)
{
if (!wq)
return;
if (wq->high)
__btrfs_destroy_workqueue(wq->high);
__btrfs_destroy_workqueue(wq->normal);
}
void btrfs_workqueue_set_max(struct btrfs_workqueue_struct *wq, int max)
{
workqueue_set_max_active(wq->normal_wq, max);
workqueue_set_max_active(wq->normal->normal_wq, max);
if (wq->high)
workqueue_set_max_active(wq->high->normal_wq, max);
}
void btrfs_set_work_high_priority(struct btrfs_work_struct *work)
{
set_bit(WORK_HIGH_PRIO_BIT, &work->flags);
}
......@@ -121,6 +121,8 @@ void btrfs_requeue_work(struct btrfs_work *work);
void btrfs_set_work_high_prio(struct btrfs_work *work);
struct btrfs_workqueue_struct;
/* Internal use only */
struct __btrfs_workqueue_struct;
struct btrfs_work_struct {
void (*func)(struct btrfs_work_struct *arg);
......@@ -130,7 +132,7 @@ struct btrfs_work_struct {
/* Don't touch things below */
struct work_struct normal_work;
struct list_head ordered_list;
struct btrfs_workqueue_struct *wq;
struct __btrfs_workqueue_struct *wq;
unsigned long flags;
};
......@@ -145,4 +147,5 @@ void btrfs_queue_work(struct btrfs_workqueue_struct *wq,
struct btrfs_work_struct *work);
void btrfs_destroy_workqueue(struct btrfs_workqueue_struct *wq);
void btrfs_workqueue_set_max(struct btrfs_workqueue_struct *wq, int max);
void btrfs_set_work_high_priority(struct btrfs_work_struct *work);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment