Commit 8f3b41ab authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Don't restrict copygc writes to the same device

This no longer makes any sense, since copygc is now one thread per
filesystem, not per device, with a single write point.
Signed-off-by: default avatarKent Overstreet <kent.overstreet@gmail.com>
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent 63b214e7
......@@ -70,12 +70,6 @@
#include <linux/rculist.h>
#include <linux/rcupdate.h>
enum bucket_alloc_ret {
ALLOC_SUCCESS,
OPEN_BUCKETS_EMPTY,
FREELIST_EMPTY, /* Allocator thread not keeping up */
};
/*
* Open buckets represent a bucket that's currently being allocated from. They
* serve two purposes:
......@@ -395,7 +389,8 @@ static void add_new_bucket(struct bch_fs *c,
ob_push(c, ptrs, ob);
}
int bch2_bucket_alloc_set(struct bch_fs *c,
enum bucket_alloc_ret
bch2_bucket_alloc_set(struct bch_fs *c,
struct open_buckets *ptrs,
struct dev_stripe_state *stripe,
struct bch_devs_mask *devs_may_alloc,
......@@ -409,7 +404,7 @@ int bch2_bucket_alloc_set(struct bch_fs *c,
struct dev_alloc_list devs_sorted =
bch2_dev_alloc_list(c, stripe, devs_may_alloc);
struct bch_dev *ca;
bool alloc_failure = false;
enum bucket_alloc_ret ret = INSUFFICIENT_DEVICES;
unsigned i;
BUG_ON(*nr_effective >= nr_replicas);
......@@ -427,16 +422,10 @@ int bch2_bucket_alloc_set(struct bch_fs *c,
ob = bch2_bucket_alloc(c, ca, reserve,
flags & BUCKET_MAY_ALLOC_PARTIAL, cl);
if (IS_ERR(ob)) {
enum bucket_alloc_ret ret = -PTR_ERR(ob);
WARN_ON(reserve == RESERVE_MOVINGGC &&
ret != OPEN_BUCKETS_EMPTY);
ret = -PTR_ERR(ob);
if (cl)
return -EAGAIN;
if (ret == OPEN_BUCKETS_EMPTY)
return -ENOSPC;
alloc_failure = true;
return ret;
continue;
}
......@@ -446,10 +435,10 @@ int bch2_bucket_alloc_set(struct bch_fs *c,
bch2_dev_stripe_increment(c, ca, stripe);
if (*nr_effective >= nr_replicas)
return 0;
return ALLOC_SUCCESS;
}
return alloc_failure ? -ENOSPC : -EROFS;
return ret;
}
/* Allocate from stripes: */
......@@ -546,7 +535,8 @@ static void get_buckets_from_writepoint(struct bch_fs *c,
wp->ptrs = ptrs_skip;
}
static int open_bucket_add_buckets(struct bch_fs *c,
static enum bucket_alloc_ret
open_bucket_add_buckets(struct bch_fs *c,
struct open_buckets *ptrs,
struct write_point *wp,
struct bch_devs_list *devs_have,
......@@ -562,8 +552,8 @@ static int open_bucket_add_buckets(struct bch_fs *c,
struct bch_devs_mask devs;
struct open_bucket *ob;
struct closure *cl = NULL;
enum bucket_alloc_ret ret;
unsigned i;
int ret;
rcu_read_lock();
devs = target_rw_devs(c, wp->type, target);
......@@ -608,7 +598,7 @@ static int open_bucket_add_buckets(struct bch_fs *c,
ret = bch2_bucket_alloc_set(c, ptrs, &wp->stripe, &devs,
nr_replicas, nr_effective, have_cache,
reserve, flags, cl);
if (ret && ret != -EROFS && !cl && _cl) {
if (ret && ret != INSUFFICIENT_DEVICES && !cl && _cl) {
cl = _cl;
goto retry_blocking;
}
......@@ -799,7 +789,8 @@ struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
unsigned nr_effective, write_points_nr;
unsigned ob_flags = 0;
bool have_cache;
int ret, i;
enum bucket_alloc_ret ret;
int i;
if (!(flags & BCH_WRITE_ONLY_SPECIFIED_DEVS))
ob_flags |= BUCKET_ALLOC_USE_DURABILITY;
......@@ -844,10 +835,13 @@ struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
alloc_done:
BUG_ON(!ret && nr_effective < nr_replicas);
WARN_ON(reserve == RESERVE_MOVINGGC &&
ret == FREELIST_EMPTY);
if (erasure_code && !ec_open_bucket(c, &ptrs))
pr_debug("failed to get ec bucket: ret %u", ret);
if (ret == -EROFS &&
if (ret == INSUFFICIENT_DEVICES &&
nr_effective >= nr_replicas_required)
ret = 0;
......@@ -881,11 +875,19 @@ struct write_point *bch2_alloc_sectors_start(struct bch_fs *c,
mutex_unlock(&wp->lock);
if (ret == -ENOSPC &&
if (ret == FREELIST_EMPTY &&
try_decrease_writepoints(c, write_points_nr))
goto retry;
return ERR_PTR(ret);
switch (ret) {
case OPEN_BUCKETS_EMPTY:
case FREELIST_EMPTY:
return cl ? ERR_PTR(-EAGAIN) : ERR_PTR(-ENOSPC);
case INSUFFICIENT_DEVICES:
return ERR_PTR(-EROFS);
default:
BUG();
}
}
/*
......
......@@ -12,6 +12,13 @@ struct bch_dev;
struct bch_fs;
struct bch_devs_List;
enum bucket_alloc_ret {
ALLOC_SUCCESS,
OPEN_BUCKETS_EMPTY,
FREELIST_EMPTY, /* Allocator thread not keeping up */
INSUFFICIENT_DEVICES,
};
struct dev_alloc_list {
unsigned nr;
u8 devs[BCH_SB_MEMBERS_MAX];
......@@ -92,7 +99,8 @@ static inline void bch2_open_bucket_get(struct bch_fs *c,
}
}
int bch2_bucket_alloc_set(struct bch_fs *, struct open_buckets *,
enum bucket_alloc_ret
bch2_bucket_alloc_set(struct bch_fs *, struct open_buckets *,
struct dev_stripe_state *, struct bch_devs_mask *,
unsigned, unsigned *, bool *, enum alloc_reserve,
unsigned, struct closure *);
......
......@@ -246,11 +246,14 @@ int bch2_migrate_write_init(struct bch_fs *c, struct migrate_write *m,
m->op.target = data_opts.target,
m->op.write_point = wp;
if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE)
if (m->data_opts.btree_insert_flags & BTREE_INSERT_USE_RESERVE) {
m->op.alloc_reserve = RESERVE_MOVINGGC;
} else {
/* XXX: this should probably be passed in */
m->op.flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS;
}
m->op.flags |= BCH_WRITE_ONLY_SPECIFIED_DEVS|
BCH_WRITE_PAGES_STABLE|
m->op.flags |= BCH_WRITE_PAGES_STABLE|
BCH_WRITE_PAGES_OWNED|
BCH_WRITE_DATA_ENCODED|
BCH_WRITE_FROM_INTERNAL;
......
......@@ -105,7 +105,7 @@ static enum data_cmd copygc_pred(struct bch_fs *c, void *arg,
return DATA_SKIP;
/* XXX: use io_opts for this inode */
data_opts->target = dev_to_target(dev_idx);
data_opts->target = io_opts->background_target;
data_opts->btree_insert_flags = BTREE_INSERT_USE_RESERVE;
data_opts->rewrite_dev = dev_idx;
return DATA_REWRITE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment