Commit 8b335bae authored by Kent Overstreet's avatar Kent Overstreet Committed by Kent Overstreet

bcachefs: Assorted fixes for running on very small devices

It's now possible to create and use a filesystem on a 512k device with
4k buckets (though at that size we still waste almost half to internal
reserves)
Signed-off-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
parent b092dadd
No related merge requests found
......@@ -374,6 +374,11 @@ static void bch2_rescale_bucket_io_times(struct bch_fs *c, int rw)
}
}
static inline u64 bucket_clock_freq(u64 capacity)
{
return max(capacity >> 10, 2028ULL);
}
static void bch2_inc_clock_hand(struct io_timer *timer)
{
struct bucket_clock *clock = container_of(timer,
......@@ -412,7 +417,7 @@ static void bch2_inc_clock_hand(struct io_timer *timer)
* RW mode (that will be 0 when we're RO, yet we can still service
* reads)
*/
timer->expire += capacity >> 10;
timer->expire += bucket_clock_freq(capacity);
bch2_io_timer_add(&c->io_clock[clock->rw], timer);
}
......@@ -424,7 +429,7 @@ static void bch2_bucket_clock_init(struct bch_fs *c, int rw)
clock->hand = 1;
clock->rw = rw;
clock->rescale.fn = bch2_inc_clock_hand;
clock->rescale.expire = c->capacity >> 10;
clock->rescale.expire = bucket_clock_freq(c->capacity);
mutex_init(&clock->lock);
}
......@@ -1011,8 +1016,6 @@ void bch2_recalc_capacity(struct bch_fs *c)
for (j = 0; j < RESERVE_NONE; j++)
dev_reserve += ca->free[j].size;
dev_reserve += ca->free_inc.size;
dev_reserve += 1; /* btree write point */
dev_reserve += 1; /* copygc write point */
dev_reserve += 1; /* rebalance write point */
......
......@@ -6,7 +6,7 @@
#include "alloc_types.h"
#include "debug.h"
#define ALLOC_SCAN_BATCH(ca) ((ca)->mi.nbuckets >> 9)
#define ALLOC_SCAN_BATCH(ca) max_t(size_t, 1, (ca)->mi.nbuckets >> 9)
const char *bch2_alloc_invalid(const struct bch_fs *, struct bkey_s_c);
int bch2_alloc_to_text(struct bch_fs *, char *, size_t, struct bkey_s_c);
......
......@@ -325,7 +325,7 @@ enum bch_time_stats {
#define BTREE_RESERVE_MAX (BTREE_MAX_DEPTH + (BTREE_MAX_DEPTH - 1))
/* Size of the freelist we allocate btree nodes from: */
#define BTREE_NODE_RESERVE (BTREE_RESERVE_MAX * 4)
#define BTREE_NODE_RESERVE BTREE_RESERVE_MAX
struct btree;
......
......@@ -909,6 +909,8 @@ struct bch_sb_field_journal {
/* BCH_SB_FIELD_members: */
#define BCH_MIN_NR_NBUCKETS (1 << 6)
struct bch_member {
__uuid_t uuid;
__le64 nbuckets; /* device size */
......@@ -1391,7 +1393,7 @@ struct jset {
LE32_BITMASK(JSET_CSUM_TYPE, struct jset, flags, 0, 4);
LE32_BITMASK(JSET_BIG_ENDIAN, struct jset, flags, 4, 5);
#define BCH_JOURNAL_BUCKETS_MIN 20
#define BCH_JOURNAL_BUCKETS_MIN 8
/* Btree: */
......
......@@ -887,9 +887,9 @@ int bch2_dev_buckets_resize(struct bch_fs *c, struct bch_dev *ca, u64 nbuckets)
size_t btree_reserve = DIV_ROUND_UP(BTREE_NODE_RESERVE,
ca->mi.bucket_size / c->opts.btree_node_size);
/* XXX: these should be tunable */
size_t reserve_none = max_t(size_t, 4, nbuckets >> 9);
size_t copygc_reserve = max_t(size_t, 16, nbuckets >> 7);
size_t free_inc_nr = max(max_t(size_t, 16, nbuckets >> 12),
size_t reserve_none = max_t(size_t, 1, nbuckets >> 9);
size_t copygc_reserve = max_t(size_t, 2, nbuckets >> 7);
size_t free_inc_nr = max(max_t(size_t, 1, nbuckets >> 12),
btree_reserve);
bool resize = ca->buckets != NULL,
start_copygc = ca->copygc_thread != NULL;
......
......@@ -278,7 +278,7 @@ int bch2_fs_recovery(struct bch_fs *c)
return ret;
err:
fsck_err:
BUG_ON(!ret);
pr_err("Error in recovery: %s (%i)", err, ret);
goto out;
}
......@@ -381,6 +381,6 @@ int bch2_fs_initialize(struct bch_fs *c)
return 0;
err:
BUG_ON(!ret);
pr_err("Error initializing new filesystem: %s (%i)", err, ret);
return ret;
}
......@@ -811,7 +811,7 @@ static const char *bch2_sb_validate_members(struct bch_sb *sb,
return "Too many buckets";
if (le64_to_cpu(m->nbuckets) -
le16_to_cpu(m->first_bucket) < 1 << 10)
le16_to_cpu(m->first_bucket) < BCH_MIN_NR_NBUCKETS)
return "Not enough buckets";
if (le16_to_cpu(m->bucket_size) <
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment