Commit e721e49d authored by David Sterba's avatar David Sterba

btrfs: make find_workspace always succeed

With just one preallocated workspace we can guarantee forward progress
even if there's no memory available for new workspaces. The cost is more
waiting but we also get rid of several error paths.

On average, there will be several idle workspaces, so the waiting
penalty won't be so bad.

In the worst case, all cpus will compete for one workspace until there's
some memory. Attempts to allocate a new one are done each time the
waiters are woken up.
Signed-off-by: default avatarDavid Sterba <dsterba@suse.com>
parent f77dd0d6
...@@ -785,8 +785,10 @@ void __init btrfs_init_compress(void) ...@@ -785,8 +785,10 @@ void __init btrfs_init_compress(void)
} }
/* /*
* this finds an available workspace or allocates a new one * This finds an available workspace or allocates a new one.
* ERR_PTR is returned if things go bad. * If it's not possible to allocate a new one, waits until there's one.
* Preallocation makes a forward progress guarantees and we do not return
* errors.
*/ */
static struct list_head *find_workspace(int type) static struct list_head *find_workspace(int type)
{ {
...@@ -826,6 +828,14 @@ static struct list_head *find_workspace(int type) ...@@ -826,6 +828,14 @@ static struct list_head *find_workspace(int type)
if (IS_ERR(workspace)) { if (IS_ERR(workspace)) {
atomic_dec(total_ws); atomic_dec(total_ws);
wake_up(ws_wait); wake_up(ws_wait);
/*
* Do not return the error but go back to waiting. There's a
* workspace preallocated for each type and the compression
* time is bounded so we get to a workspace eventually. This
* makes our caller's life easier.
*/
goto again;
} }
return workspace; return workspace;
} }
...@@ -913,8 +923,6 @@ int btrfs_compress_pages(int type, struct address_space *mapping, ...@@ -913,8 +923,6 @@ int btrfs_compress_pages(int type, struct address_space *mapping,
int ret; int ret;
workspace = find_workspace(type); workspace = find_workspace(type);
if (IS_ERR(workspace))
return PTR_ERR(workspace);
ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping, ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping,
start, len, pages, start, len, pages,
...@@ -949,8 +957,6 @@ static int btrfs_decompress_biovec(int type, struct page **pages_in, ...@@ -949,8 +957,6 @@ static int btrfs_decompress_biovec(int type, struct page **pages_in,
int ret; int ret;
workspace = find_workspace(type); workspace = find_workspace(type);
if (IS_ERR(workspace))
return PTR_ERR(workspace);
ret = btrfs_compress_op[type-1]->decompress_biovec(workspace, pages_in, ret = btrfs_compress_op[type-1]->decompress_biovec(workspace, pages_in,
disk_start, disk_start,
...@@ -971,8 +977,6 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page, ...@@ -971,8 +977,6 @@ int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
int ret; int ret;
workspace = find_workspace(type); workspace = find_workspace(type);
if (IS_ERR(workspace))
return PTR_ERR(workspace);
ret = btrfs_compress_op[type-1]->decompress(workspace, data_in, ret = btrfs_compress_op[type-1]->decompress(workspace, data_in,
dest_page, start_byte, dest_page, start_byte,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment