Commit a20ffa7d authored by Darrick J. Wong's avatar Darrick J. Wong

xfs: add debug knobs to control btree bulk load slack factors

Add some debug knobs so that we can control the leaf and node block
slack when rebuilding btrees.

For developers, it might be useful to construct btrees of various
heights by crafting a filesystem with a certain number of records and
then using repair+knobs to rebuild the index with a certain shape.
Practically speaking, you'd only ever do that for extreme stress
testing of the runtime code or the btree generator.
Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
parent 26de6462
......@@ -32,6 +32,7 @@
* btree bulk loading code calculates for us. However, there are some
* exceptions to this rule:
*
* (0) If someone turned one of the debug knobs.
* (1) If this is a per-AG btree and the AG has less than 10% space free.
* (2) If this is an inode btree and the FS has less than 10% space free.
......@@ -47,9 +48,13 @@ xrep_newbt_estimate_slack(
uint64_t free;
uint64_t sz;
/* Let the btree code compute the default slack values. */
bload->leaf_slack = -1;
bload->node_slack = -1;
/*
* The xfs_globals values are set to -1 (i.e. take the bload defaults)
* unless someone has set them otherwise, so we just pull the values
* here.
*/
bload->leaf_slack = xfs_globals.bload_leaf_slack;
bload->node_slack = xfs_globals.bload_node_slack;
if (sc->ops->type == ST_PERAG) {
free = sc->sa.pag->pagf_freeblks;
......
......@@ -44,4 +44,16 @@ struct xfs_globals xfs_globals = {
.pwork_threads = -1, /* automatic thread detection */
.larp = false, /* log attribute replay */
#endif
/*
* Leave this many record slots empty when bulk loading btrees. By
* default we load new btree leaf blocks 75% full.
*/
.bload_leaf_slack = -1,
/*
* Leave this many key/ptr slots empty when bulk loading btrees. By
* default we load new btree node blocks 75% full.
*/
.bload_node_slack = -1,
};
......@@ -85,6 +85,8 @@ struct xfs_globals {
int pwork_threads; /* parallel workqueue threads */
bool larp; /* log attribute replay */
#endif
int bload_leaf_slack; /* btree bulk load leaf slack */
int bload_node_slack; /* btree bulk load node slack */
int log_recovery_delay; /* log recovery delay (secs) */
int mount_delay; /* mount setup delay (secs) */
bool bug_on_assert; /* BUG() the kernel on assert failure */
......
......@@ -262,6 +262,58 @@ larp_show(
XFS_SYSFS_ATTR_RW(larp);
#endif /* DEBUG */
STATIC ssize_t
bload_leaf_slack_store(
struct kobject *kobject,
const char *buf,
size_t count)
{
int ret;
int val;
ret = kstrtoint(buf, 0, &val);
if (ret)
return ret;
xfs_globals.bload_leaf_slack = val;
return count;
}
STATIC ssize_t
bload_leaf_slack_show(
struct kobject *kobject,
char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.bload_leaf_slack);
}
XFS_SYSFS_ATTR_RW(bload_leaf_slack);
STATIC ssize_t
bload_node_slack_store(
struct kobject *kobject,
const char *buf,
size_t count)
{
int ret;
int val;
ret = kstrtoint(buf, 0, &val);
if (ret)
return ret;
xfs_globals.bload_node_slack = val;
return count;
}
STATIC ssize_t
bload_node_slack_show(
struct kobject *kobject,
char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", xfs_globals.bload_node_slack);
}
XFS_SYSFS_ATTR_RW(bload_node_slack);
static struct attribute *xfs_dbg_attrs[] = {
ATTR_LIST(bug_on_assert),
ATTR_LIST(log_recovery_delay),
......@@ -271,6 +323,8 @@ static struct attribute *xfs_dbg_attrs[] = {
ATTR_LIST(pwork_threads),
ATTR_LIST(larp),
#endif
ATTR_LIST(bload_leaf_slack),
ATTR_LIST(bload_node_slack),
NULL,
};
ATTRIBUTE_GROUPS(xfs_dbg);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment