Commit ca4ac360 authored by Dennis Zhou's avatar Dennis Zhou Committed by David Sterba

btrfs: manage heuristic workspace as index 0

While the heuristic workspaces aren't really compression workspaces,
they use the same interface for managing them. So rather than branching,
let's just handle them once again as the index 0 compression type.
Reviewed-by: default avatarNikolay Borisov <nborisov@suse.com>
Reviewed-by: default avatarJosef Bacik <josef@toxicpanda.com>
Signed-off-by: default avatarDennis Zhou <dennis@kernel.org>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent acce85de
...@@ -769,6 +769,11 @@ static struct list_head *alloc_heuristic_ws(void) ...@@ -769,6 +769,11 @@ static struct list_head *alloc_heuristic_ws(void)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
const struct btrfs_compress_op btrfs_heuristic_compress = {
.alloc_workspace = alloc_heuristic_ws,
.free_workspace = free_heuristic_ws,
};
struct workspace_manager { struct workspace_manager {
struct list_head idle_ws; struct list_head idle_ws;
spinlock_t ws_lock; spinlock_t ws_lock;
...@@ -780,11 +785,11 @@ struct workspace_manager { ...@@ -780,11 +785,11 @@ struct workspace_manager {
wait_queue_head_t ws_wait; wait_queue_head_t ws_wait;
}; };
static struct workspace_manager wsm[BTRFS_COMPRESS_TYPES]; static struct workspace_manager wsm[BTRFS_NR_WORKSPACE_MANAGERS];
static struct workspace_manager btrfs_heuristic_ws;
static const struct btrfs_compress_op * const btrfs_compress_op[] = { static const struct btrfs_compress_op * const btrfs_compress_op[] = {
/* The heuristic is represented as compression type 0 */
&btrfs_heuristic_compress,
&btrfs_zlib_compress, &btrfs_zlib_compress,
&btrfs_lzo_compress, &btrfs_lzo_compress,
&btrfs_zstd_compress, &btrfs_zstd_compress,
...@@ -795,22 +800,7 @@ void __init btrfs_init_compress(void) ...@@ -795,22 +800,7 @@ void __init btrfs_init_compress(void)
struct list_head *workspace; struct list_head *workspace;
int i; int i;
INIT_LIST_HEAD(&btrfs_heuristic_ws.idle_ws); for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++) {
spin_lock_init(&btrfs_heuristic_ws.ws_lock);
atomic_set(&btrfs_heuristic_ws.total_ws, 0);
init_waitqueue_head(&btrfs_heuristic_ws.ws_wait);
workspace = alloc_heuristic_ws();
if (IS_ERR(workspace)) {
pr_warn(
"BTRFS: cannot preallocate heuristic workspace, will try later\n");
} else {
atomic_set(&btrfs_heuristic_ws.total_ws, 1);
btrfs_heuristic_ws.free_ws = 1;
list_add(workspace, &btrfs_heuristic_ws.idle_ws);
}
for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
INIT_LIST_HEAD(&wsm[i].idle_ws); INIT_LIST_HEAD(&wsm[i].idle_ws);
spin_lock_init(&wsm[i].ws_lock); spin_lock_init(&wsm[i].ws_lock);
atomic_set(&wsm[i].total_ws, 0); atomic_set(&wsm[i].total_ws, 0);
...@@ -837,11 +827,10 @@ void __init btrfs_init_compress(void) ...@@ -837,11 +827,10 @@ void __init btrfs_init_compress(void)
* Preallocation makes a forward progress guarantees and we do not return * Preallocation makes a forward progress guarantees and we do not return
* errors. * errors.
*/ */
static struct list_head *__find_workspace(int type, bool heuristic) static struct list_head *find_workspace(int type)
{ {
struct list_head *workspace; struct list_head *workspace;
int cpus = num_online_cpus(); int cpus = num_online_cpus();
int idx = type - 1;
unsigned nofs_flag; unsigned nofs_flag;
struct list_head *idle_ws; struct list_head *idle_ws;
spinlock_t *ws_lock; spinlock_t *ws_lock;
...@@ -849,19 +838,11 @@ static struct list_head *__find_workspace(int type, bool heuristic) ...@@ -849,19 +838,11 @@ static struct list_head *__find_workspace(int type, bool heuristic)
wait_queue_head_t *ws_wait; wait_queue_head_t *ws_wait;
int *free_ws; int *free_ws;
if (heuristic) { idle_ws = &wsm[type].idle_ws;
idle_ws = &btrfs_heuristic_ws.idle_ws; ws_lock = &wsm[type].ws_lock;
ws_lock = &btrfs_heuristic_ws.ws_lock; total_ws = &wsm[type].total_ws;
total_ws = &btrfs_heuristic_ws.total_ws; ws_wait = &wsm[type].ws_wait;
ws_wait = &btrfs_heuristic_ws.ws_wait; free_ws = &wsm[type].free_ws;
free_ws = &btrfs_heuristic_ws.free_ws;
} else {
idle_ws = &wsm[idx].idle_ws;
ws_lock = &wsm[idx].ws_lock;
total_ws = &wsm[idx].total_ws;
ws_wait = &wsm[idx].ws_wait;
free_ws = &wsm[idx].free_ws;
}
again: again:
spin_lock(ws_lock); spin_lock(ws_lock);
...@@ -892,10 +873,7 @@ static struct list_head *__find_workspace(int type, bool heuristic) ...@@ -892,10 +873,7 @@ static struct list_head *__find_workspace(int type, bool heuristic)
* context of btrfs_compress_bio/btrfs_compress_pages * context of btrfs_compress_bio/btrfs_compress_pages
*/ */
nofs_flag = memalloc_nofs_save(); nofs_flag = memalloc_nofs_save();
if (heuristic) workspace = btrfs_compress_op[type]->alloc_workspace();
workspace = alloc_heuristic_ws();
else
workspace = btrfs_compress_op[idx]->alloc_workspace();
memalloc_nofs_restore(nofs_flag); memalloc_nofs_restore(nofs_flag);
if (IS_ERR(workspace)) { if (IS_ERR(workspace)) {
...@@ -926,38 +904,23 @@ static struct list_head *__find_workspace(int type, bool heuristic) ...@@ -926,38 +904,23 @@ static struct list_head *__find_workspace(int type, bool heuristic)
return workspace; return workspace;
} }
static struct list_head *find_workspace(int type)
{
return __find_workspace(type, false);
}
/* /*
* put a workspace struct back on the list or free it if we have enough * put a workspace struct back on the list or free it if we have enough
* idle ones sitting around * idle ones sitting around
*/ */
static void __free_workspace(int type, struct list_head *workspace, static void free_workspace(int type, struct list_head *workspace)
bool heuristic)
{ {
int idx = type - 1;
struct list_head *idle_ws; struct list_head *idle_ws;
spinlock_t *ws_lock; spinlock_t *ws_lock;
atomic_t *total_ws; atomic_t *total_ws;
wait_queue_head_t *ws_wait; wait_queue_head_t *ws_wait;
int *free_ws; int *free_ws;
if (heuristic) { idle_ws = &wsm[type].idle_ws;
idle_ws = &btrfs_heuristic_ws.idle_ws; ws_lock = &wsm[type].ws_lock;
ws_lock = &btrfs_heuristic_ws.ws_lock; total_ws = &wsm[type].total_ws;
total_ws = &btrfs_heuristic_ws.total_ws; ws_wait = &wsm[type].ws_wait;
ws_wait = &btrfs_heuristic_ws.ws_wait; free_ws = &wsm[type].free_ws;
free_ws = &btrfs_heuristic_ws.free_ws;
} else {
idle_ws = &wsm[idx].idle_ws;
ws_lock = &wsm[idx].ws_lock;
total_ws = &wsm[idx].total_ws;
ws_wait = &wsm[idx].ws_wait;
free_ws = &wsm[idx].free_ws;
}
spin_lock(ws_lock); spin_lock(ws_lock);
if (*free_ws <= num_online_cpus()) { if (*free_ws <= num_online_cpus()) {
...@@ -968,20 +931,12 @@ static void __free_workspace(int type, struct list_head *workspace, ...@@ -968,20 +931,12 @@ static void __free_workspace(int type, struct list_head *workspace,
} }
spin_unlock(ws_lock); spin_unlock(ws_lock);
if (heuristic) btrfs_compress_op[type]->free_workspace(workspace);
free_heuristic_ws(workspace);
else
btrfs_compress_op[idx]->free_workspace(workspace);
atomic_dec(total_ws); atomic_dec(total_ws);
wake: wake:
cond_wake_up(ws_wait); cond_wake_up(ws_wait);
} }
static void free_workspace(int type, struct list_head *ws)
{
return __free_workspace(type, ws, false);
}
/* /*
* cleanup function for module exit * cleanup function for module exit
*/ */
...@@ -990,14 +945,7 @@ static void free_workspaces(void) ...@@ -990,14 +945,7 @@ static void free_workspaces(void)
struct list_head *workspace; struct list_head *workspace;
int i; int i;
while (!list_empty(&btrfs_heuristic_ws.idle_ws)) { for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++) {
workspace = btrfs_heuristic_ws.idle_ws.next;
list_del(workspace);
free_heuristic_ws(workspace);
atomic_dec(&btrfs_heuristic_ws.total_ws);
}
for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
while (!list_empty(&wsm[i].idle_ws)) { while (!list_empty(&wsm[i].idle_ws)) {
workspace = wsm[i].idle_ws.next; workspace = wsm[i].idle_ws.next;
list_del(workspace); list_del(workspace);
...@@ -1042,8 +990,8 @@ int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping, ...@@ -1042,8 +990,8 @@ int btrfs_compress_pages(unsigned int type_level, struct address_space *mapping,
workspace = find_workspace(type); workspace = find_workspace(type);
btrfs_compress_op[type - 1]->set_level(workspace, type_level); btrfs_compress_op[type]->set_level(workspace, type_level);
ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping, ret = btrfs_compress_op[type]->compress_pages(workspace, mapping,
start, pages, start, pages,
out_pages, out_pages,
total_in, total_out); total_in, total_out);
...@@ -1072,7 +1020,7 @@ static int btrfs_decompress_bio(struct compressed_bio *cb) ...@@ -1072,7 +1020,7 @@ static int btrfs_decompress_bio(struct compressed_bio *cb)
int type = cb->compress_type; int type = cb->compress_type;
workspace = find_workspace(type); workspace = find_workspace(type);
ret = btrfs_compress_op[type - 1]->decompress_bio(workspace, cb); ret = btrfs_compress_op[type]->decompress_bio(workspace, cb);
free_workspace(type, workspace); free_workspace(type, workspace);
return ret; return ret;
...@@ -1091,7 +1039,7 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, ...@@ -1091,7 +1039,7 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
workspace = find_workspace(type); workspace = find_workspace(type);
ret = btrfs_compress_op[type-1]->decompress(workspace, data_in, ret = btrfs_compress_op[type]->decompress(workspace, data_in,
dest_page, start_byte, dest_page, start_byte,
srclen, destlen); srclen, destlen);
...@@ -1512,7 +1460,7 @@ static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end, ...@@ -1512,7 +1460,7 @@ static void heuristic_collect_sample(struct inode *inode, u64 start, u64 end,
*/ */
int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end) int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
{ {
struct list_head *ws_list = __find_workspace(0, true); struct list_head *ws_list = find_workspace(0);
struct heuristic_ws *ws; struct heuristic_ws *ws;
u32 i; u32 i;
u8 byte; u8 byte;
...@@ -1581,7 +1529,7 @@ int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end) ...@@ -1581,7 +1529,7 @@ int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end)
} }
out: out:
__free_workspace(0, ws_list, true); free_workspace(0, ws_list);
return ret; return ret;
} }
......
...@@ -132,6 +132,10 @@ struct btrfs_compress_op { ...@@ -132,6 +132,10 @@ struct btrfs_compress_op {
void (*set_level)(struct list_head *ws, unsigned int type); void (*set_level)(struct list_head *ws, unsigned int type);
}; };
/* The heuristic workspaces are managed via the 0th workspace manager */
#define BTRFS_NR_WORKSPACE_MANAGERS (BTRFS_COMPRESS_TYPES + 1)
extern const struct btrfs_compress_op btrfs_heuristic_compress;
extern const struct btrfs_compress_op btrfs_zlib_compress; extern const struct btrfs_compress_op btrfs_zlib_compress;
extern const struct btrfs_compress_op btrfs_lzo_compress; extern const struct btrfs_compress_op btrfs_lzo_compress;
extern const struct btrfs_compress_op btrfs_zstd_compress; extern const struct btrfs_compress_op btrfs_zstd_compress;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment