Commit 10b94a51 authored by Dennis Zhou's avatar Dennis Zhou Committed by David Sterba

btrfs: unify compression ops with workspace_manager

Make the workspace_manager own the interface operations rather than
managing index-paired arrays for the workspace_manager and compression
operations.
Reviewed-by: default avatarNikolay Borisov <nborisov@suse.com>
Reviewed-by: default avatarJosef Bacik <josef@toxicpanda.com>
Signed-off-by: default avatarDennis Zhou <dennis@kernel.org>
Reviewed-by: default avatarDavid Sterba <dsterba@suse.com>
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent ca4ac360
...@@ -775,6 +775,7 @@ const struct btrfs_compress_op btrfs_heuristic_compress = { ...@@ -775,6 +775,7 @@ const struct btrfs_compress_op btrfs_heuristic_compress = {
}; };
struct workspace_manager { struct workspace_manager {
const struct btrfs_compress_op *ops;
struct list_head idle_ws; struct list_head idle_ws;
spinlock_t ws_lock; spinlock_t ws_lock;
/* Number of free workspaces */ /* Number of free workspaces */
...@@ -801,6 +802,8 @@ void __init btrfs_init_compress(void) ...@@ -801,6 +802,8 @@ void __init btrfs_init_compress(void)
int i; int i;
for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++) { for (i = 0; i < BTRFS_NR_WORKSPACE_MANAGERS; i++) {
wsm[i].ops = btrfs_compress_op[i];
INIT_LIST_HEAD(&wsm[i].idle_ws); INIT_LIST_HEAD(&wsm[i].idle_ws);
spin_lock_init(&wsm[i].ws_lock); spin_lock_init(&wsm[i].ws_lock);
atomic_set(&wsm[i].total_ws, 0); atomic_set(&wsm[i].total_ws, 0);
...@@ -810,7 +813,7 @@ void __init btrfs_init_compress(void) ...@@ -810,7 +813,7 @@ void __init btrfs_init_compress(void)
* Preallocate one workspace for each compression type so * Preallocate one workspace for each compression type so
* we can guarantee forward progress in the worst case * we can guarantee forward progress in the worst case
*/ */
workspace = btrfs_compress_op[i]->alloc_workspace(); workspace = wsm[i].ops->alloc_workspace();
if (IS_ERR(workspace)) { if (IS_ERR(workspace)) {
pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n"); pr_warn("BTRFS: cannot preallocate compression workspace, will try later\n");
} else { } else {
...@@ -873,7 +876,7 @@ static struct list_head *find_workspace(int type) ...@@ -873,7 +876,7 @@ static struct list_head *find_workspace(int type)
* context of btrfs_compress_bio/btrfs_compress_pages * context of btrfs_compress_bio/btrfs_compress_pages
*/ */
nofs_flag = memalloc_nofs_save(); nofs_flag = memalloc_nofs_save();
workspace = btrfs_compress_op[type]->alloc_workspace(); workspace = wsm[type].ops->alloc_workspace();
memalloc_nofs_restore(nofs_flag); memalloc_nofs_restore(nofs_flag);
if (IS_ERR(workspace)) { if (IS_ERR(workspace)) {
...@@ -931,7 +934,7 @@ static void free_workspace(int type, struct list_head *workspace) ...@@ -931,7 +934,7 @@ static void free_workspace(int type, struct list_head *workspace)
} }
spin_unlock(ws_lock); spin_unlock(ws_lock);
btrfs_compress_op[type]->free_workspace(workspace); wsm[type].ops->free_workspace(workspace);
atomic_dec(total_ws); atomic_dec(total_ws);
wake: wake:
cond_wake_up(ws_wait); cond_wake_up(ws_wait);
...@@ -949,7 +952,7 @@ static void free_workspaces(void) ...@@ -949,7 +952,7 @@ static void free_workspaces(void)
while (!list_empty(&wsm[i].idle_ws)) { while (!list_empty(&wsm[i].idle_ws)) {
workspace = wsm[i].idle_ws.next; workspace = wsm[i].idle_ws.next;
list_del(workspace); list_del(workspace);
btrfs_compress_op[i]->free_workspace(workspace); wsm[i].ops->free_workspace(workspace);
atomic_dec(&wsm[i].total_ws); atomic_dec(&wsm[i].total_ws);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment