Commit 4a200a09 authored by Darrick J. Wong's avatar Darrick J. Wong

xfs: implement masked btree key comparisons for _has_records scans

For keyspace fullness scans, we want to be able to mask off the parts of
the key that we don't care about.  For most btree types we /do/ want the
full keyspace, but for checking that a given space usage also has a full
complement of rmapbt records (even if different/multiple owners) we need
this masking so that we only track sparseness of rm_startblock, not the
whole keyspace (which is extremely sparse).

Augment the ->diff_two_keys and ->keys_contiguous helpers to take a
third union xfs_btree_key argument, and wire up xfs_rmap_has_records to
pass this through.  This third "mask" argument should contain a nonzero
value in each structure field that should be used in the key comparisons
done during the scan.
Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
parent 6abc7aef
...@@ -3764,7 +3764,7 @@ xfs_alloc_has_records( ...@@ -3764,7 +3764,7 @@ xfs_alloc_has_records(
memset(&high, 0xFF, sizeof(high)); memset(&high, 0xFF, sizeof(high));
high.a.ar_startblock = bno + len - 1; high.a.ar_startblock = bno + len - 1;
return xfs_btree_has_records(cur, &low, &high, outcome); return xfs_btree_has_records(cur, &low, &high, NULL, outcome);
} }
/* /*
......
...@@ -260,20 +260,27 @@ STATIC int64_t ...@@ -260,20 +260,27 @@ STATIC int64_t
xfs_bnobt_diff_two_keys( xfs_bnobt_diff_two_keys(
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
const union xfs_btree_key *k1, const union xfs_btree_key *k1,
const union xfs_btree_key *k2) const union xfs_btree_key *k2,
const union xfs_btree_key *mask)
{ {
ASSERT(!mask || mask->alloc.ar_startblock);
return (int64_t)be32_to_cpu(k1->alloc.ar_startblock) - return (int64_t)be32_to_cpu(k1->alloc.ar_startblock) -
be32_to_cpu(k2->alloc.ar_startblock); be32_to_cpu(k2->alloc.ar_startblock);
} }
STATIC int64_t STATIC int64_t
xfs_cntbt_diff_two_keys( xfs_cntbt_diff_two_keys(
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
const union xfs_btree_key *k1, const union xfs_btree_key *k1,
const union xfs_btree_key *k2) const union xfs_btree_key *k2,
const union xfs_btree_key *mask)
{ {
int64_t diff; int64_t diff;
ASSERT(!mask || (mask->alloc.ar_blockcount &&
mask->alloc.ar_startblock));
diff = be32_to_cpu(k1->alloc.ar_blockcount) - diff = be32_to_cpu(k1->alloc.ar_blockcount) -
be32_to_cpu(k2->alloc.ar_blockcount); be32_to_cpu(k2->alloc.ar_blockcount);
if (diff) if (diff)
...@@ -427,8 +434,11 @@ STATIC enum xbtree_key_contig ...@@ -427,8 +434,11 @@ STATIC enum xbtree_key_contig
xfs_allocbt_keys_contiguous( xfs_allocbt_keys_contiguous(
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
const union xfs_btree_key *key1, const union xfs_btree_key *key1,
const union xfs_btree_key *key2) const union xfs_btree_key *key2,
const union xfs_btree_key *mask)
{ {
ASSERT(!mask || mask->alloc.ar_startblock);
return xbtree_key_contig(be32_to_cpu(key1->alloc.ar_startblock), return xbtree_key_contig(be32_to_cpu(key1->alloc.ar_startblock),
be32_to_cpu(key2->alloc.ar_startblock)); be32_to_cpu(key2->alloc.ar_startblock));
} }
......
...@@ -382,11 +382,14 @@ STATIC int64_t ...@@ -382,11 +382,14 @@ STATIC int64_t
xfs_bmbt_diff_two_keys( xfs_bmbt_diff_two_keys(
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
const union xfs_btree_key *k1, const union xfs_btree_key *k1,
const union xfs_btree_key *k2) const union xfs_btree_key *k2,
const union xfs_btree_key *mask)
{ {
uint64_t a = be64_to_cpu(k1->bmbt.br_startoff); uint64_t a = be64_to_cpu(k1->bmbt.br_startoff);
uint64_t b = be64_to_cpu(k2->bmbt.br_startoff); uint64_t b = be64_to_cpu(k2->bmbt.br_startoff);
ASSERT(!mask || mask->bmbt.br_startoff);
/* /*
* Note: This routine previously casted a and b to int64 and subtracted * Note: This routine previously casted a and b to int64 and subtracted
* them to generate a result. This lead to problems if b was the * them to generate a result. This lead to problems if b was the
...@@ -504,8 +507,11 @@ STATIC enum xbtree_key_contig ...@@ -504,8 +507,11 @@ STATIC enum xbtree_key_contig
xfs_bmbt_keys_contiguous( xfs_bmbt_keys_contiguous(
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
const union xfs_btree_key *key1, const union xfs_btree_key *key1,
const union xfs_btree_key *key2) const union xfs_btree_key *key2,
const union xfs_btree_key *mask)
{ {
ASSERT(!mask || mask->bmbt.br_startoff);
return xbtree_key_contig(be64_to_cpu(key1->bmbt.br_startoff), return xbtree_key_contig(be64_to_cpu(key1->bmbt.br_startoff),
be64_to_cpu(key2->bmbt.br_startoff)); be64_to_cpu(key2->bmbt.br_startoff));
} }
......
...@@ -5030,6 +5030,9 @@ struct xfs_btree_has_records { ...@@ -5030,6 +5030,9 @@ struct xfs_btree_has_records {
union xfs_btree_key start_key; union xfs_btree_key start_key;
union xfs_btree_key end_key; union xfs_btree_key end_key;
/* Mask for key comparisons, if desired. */
const union xfs_btree_key *key_mask;
/* Highest record key we've seen so far. */ /* Highest record key we've seen so far. */
union xfs_btree_key high_key; union xfs_btree_key high_key;
...@@ -5057,7 +5060,8 @@ xfs_btree_has_records_helper( ...@@ -5057,7 +5060,8 @@ xfs_btree_has_records_helper(
* then there is a hole at the start of the search range. * then there is a hole at the start of the search range.
* Classify this as sparse and stop immediately. * Classify this as sparse and stop immediately.
*/ */
if (xfs_btree_keycmp_lt(cur, &info->start_key, &rec_key)) if (xfs_btree_masked_keycmp_lt(cur, &info->start_key, &rec_key,
info->key_mask))
return -ECANCELED; return -ECANCELED;
} else { } else {
/* /*
...@@ -5068,7 +5072,7 @@ xfs_btree_has_records_helper( ...@@ -5068,7 +5072,7 @@ xfs_btree_has_records_helper(
* signal corruption. * signal corruption.
*/ */
key_contig = cur->bc_ops->keys_contiguous(cur, &info->high_key, key_contig = cur->bc_ops->keys_contiguous(cur, &info->high_key,
&rec_key); &rec_key, info->key_mask);
if (key_contig == XBTREE_KEY_OVERLAP && if (key_contig == XBTREE_KEY_OVERLAP &&
!(cur->bc_flags & XFS_BTREE_OVERLAPPING)) !(cur->bc_flags & XFS_BTREE_OVERLAPPING))
return -EFSCORRUPTED; return -EFSCORRUPTED;
...@@ -5081,7 +5085,8 @@ xfs_btree_has_records_helper( ...@@ -5081,7 +5085,8 @@ xfs_btree_has_records_helper(
* remember it for later. * remember it for later.
*/ */
cur->bc_ops->init_high_key_from_rec(&rec_high_key, rec); cur->bc_ops->init_high_key_from_rec(&rec_high_key, rec);
if (xfs_btree_keycmp_gt(cur, &rec_high_key, &info->high_key)) if (xfs_btree_masked_keycmp_gt(cur, &rec_high_key, &info->high_key,
info->key_mask))
info->high_key = rec_high_key; /* struct copy */ info->high_key = rec_high_key; /* struct copy */
return 0; return 0;
...@@ -5092,16 +5097,26 @@ xfs_btree_has_records_helper( ...@@ -5092,16 +5097,26 @@ xfs_btree_has_records_helper(
* map to any records; is fully mapped to records; or is partially mapped to * map to any records; is fully mapped to records; or is partially mapped to
* records. This is the btree record equivalent to determining if a file is * records. This is the btree record equivalent to determining if a file is
* sparse. * sparse.
*
* For most btree types, the record scan should use all available btree key
* fields to compare the keys encountered. These callers should pass NULL for
* @mask. However, some callers (e.g. scanning physical space in the rmapbt)
* want to ignore some part of the btree record keyspace when performing the
* comparison. These callers should pass in a union xfs_btree_key object with
* the fields that *should* be a part of the comparison set to any nonzero
* value, and the rest zeroed.
*/ */
int int
xfs_btree_has_records( xfs_btree_has_records(
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
const union xfs_btree_irec *low, const union xfs_btree_irec *low,
const union xfs_btree_irec *high, const union xfs_btree_irec *high,
const union xfs_btree_key *mask,
enum xbtree_recpacking *outcome) enum xbtree_recpacking *outcome)
{ {
struct xfs_btree_has_records info = { struct xfs_btree_has_records info = {
.outcome = XBTREE_RECPACKING_EMPTY, .outcome = XBTREE_RECPACKING_EMPTY,
.key_mask = mask,
}; };
int error; int error;
...@@ -5129,7 +5144,8 @@ xfs_btree_has_records( ...@@ -5129,7 +5144,8 @@ xfs_btree_has_records(
* the end of the search range, classify this as full. Otherwise, * the end of the search range, classify this as full. Otherwise,
* there is a hole at the end of the search range. * there is a hole at the end of the search range.
*/ */
if (xfs_btree_keycmp_ge(cur, &info.high_key, &info.end_key)) if (xfs_btree_masked_keycmp_ge(cur, &info.high_key, &info.end_key,
mask))
info.outcome = XBTREE_RECPACKING_FULL; info.outcome = XBTREE_RECPACKING_FULL;
out: out:
......
...@@ -161,11 +161,14 @@ struct xfs_btree_ops { ...@@ -161,11 +161,14 @@ struct xfs_btree_ops {
/* /*
* Difference between key2 and key1 -- positive if key1 > key2, * Difference between key2 and key1 -- positive if key1 > key2,
* negative if key1 < key2, and zero if equal. * negative if key1 < key2, and zero if equal. If the @mask parameter
* is non NULL, each key field to be used in the comparison must
* contain a nonzero value.
*/ */
int64_t (*diff_two_keys)(struct xfs_btree_cur *cur, int64_t (*diff_two_keys)(struct xfs_btree_cur *cur,
const union xfs_btree_key *key1, const union xfs_btree_key *key1,
const union xfs_btree_key *key2); const union xfs_btree_key *key2,
const union xfs_btree_key *mask);
const struct xfs_buf_ops *buf_ops; const struct xfs_buf_ops *buf_ops;
...@@ -187,10 +190,13 @@ struct xfs_btree_ops { ...@@ -187,10 +190,13 @@ struct xfs_btree_ops {
* @key1 < K < @key2. To determine if two btree records are * @key1 < K < @key2. To determine if two btree records are
* immediately adjacent, @key1 should be the high key of the first * immediately adjacent, @key1 should be the high key of the first
* record and @key2 should be the low key of the second record. * record and @key2 should be the low key of the second record.
* If the @mask parameter is non NULL, each key field to be used in the
* comparison must contain a nonzero value.
*/ */
enum xbtree_key_contig (*keys_contiguous)(struct xfs_btree_cur *cur, enum xbtree_key_contig (*keys_contiguous)(struct xfs_btree_cur *cur,
const union xfs_btree_key *key1, const union xfs_btree_key *key1,
const union xfs_btree_key *key2); const union xfs_btree_key *key2,
const union xfs_btree_key *mask);
}; };
/* /*
...@@ -581,6 +587,7 @@ typedef bool (*xfs_btree_key_gap_fn)(struct xfs_btree_cur *cur, ...@@ -581,6 +587,7 @@ typedef bool (*xfs_btree_key_gap_fn)(struct xfs_btree_cur *cur,
int xfs_btree_has_records(struct xfs_btree_cur *cur, int xfs_btree_has_records(struct xfs_btree_cur *cur,
const union xfs_btree_irec *low, const union xfs_btree_irec *low,
const union xfs_btree_irec *high, const union xfs_btree_irec *high,
const union xfs_btree_key *mask,
enum xbtree_recpacking *outcome); enum xbtree_recpacking *outcome);
bool xfs_btree_has_more_records(struct xfs_btree_cur *cur); bool xfs_btree_has_more_records(struct xfs_btree_cur *cur);
...@@ -593,7 +600,7 @@ xfs_btree_keycmp_lt( ...@@ -593,7 +600,7 @@ xfs_btree_keycmp_lt(
const union xfs_btree_key *key1, const union xfs_btree_key *key1,
const union xfs_btree_key *key2) const union xfs_btree_key *key2)
{ {
return cur->bc_ops->diff_two_keys(cur, key1, key2) < 0; return cur->bc_ops->diff_two_keys(cur, key1, key2, NULL) < 0;
} }
static inline bool static inline bool
...@@ -602,7 +609,7 @@ xfs_btree_keycmp_gt( ...@@ -602,7 +609,7 @@ xfs_btree_keycmp_gt(
const union xfs_btree_key *key1, const union xfs_btree_key *key1,
const union xfs_btree_key *key2) const union xfs_btree_key *key2)
{ {
return cur->bc_ops->diff_two_keys(cur, key1, key2) > 0; return cur->bc_ops->diff_two_keys(cur, key1, key2, NULL) > 0;
} }
static inline bool static inline bool
...@@ -611,7 +618,7 @@ xfs_btree_keycmp_eq( ...@@ -611,7 +618,7 @@ xfs_btree_keycmp_eq(
const union xfs_btree_key *key1, const union xfs_btree_key *key1,
const union xfs_btree_key *key2) const union xfs_btree_key *key2)
{ {
return cur->bc_ops->diff_two_keys(cur, key1, key2) == 0; return cur->bc_ops->diff_two_keys(cur, key1, key2, NULL) == 0;
} }
static inline bool static inline bool
...@@ -641,6 +648,37 @@ xfs_btree_keycmp_ne( ...@@ -641,6 +648,37 @@ xfs_btree_keycmp_ne(
return !xfs_btree_keycmp_eq(cur, key1, key2); return !xfs_btree_keycmp_eq(cur, key1, key2);
} }
/* Masked key comparison helpers */
static inline bool
xfs_btree_masked_keycmp_lt(
struct xfs_btree_cur *cur,
const union xfs_btree_key *key1,
const union xfs_btree_key *key2,
const union xfs_btree_key *mask)
{
return cur->bc_ops->diff_two_keys(cur, key1, key2, mask) < 0;
}
static inline bool
xfs_btree_masked_keycmp_gt(
struct xfs_btree_cur *cur,
const union xfs_btree_key *key1,
const union xfs_btree_key *key2,
const union xfs_btree_key *mask)
{
return cur->bc_ops->diff_two_keys(cur, key1, key2, mask) > 0;
}
static inline bool
xfs_btree_masked_keycmp_ge(
struct xfs_btree_cur *cur,
const union xfs_btree_key *key1,
const union xfs_btree_key *key2,
const union xfs_btree_key *mask)
{
return !xfs_btree_masked_keycmp_lt(cur, key1, key2, mask);
}
/* Does this cursor point to the last block in the given level? */ /* Does this cursor point to the last block in the given level? */
static inline bool static inline bool
xfs_btree_islastblock( xfs_btree_islastblock(
......
...@@ -269,10 +269,13 @@ STATIC int64_t ...@@ -269,10 +269,13 @@ STATIC int64_t
xfs_inobt_diff_two_keys( xfs_inobt_diff_two_keys(
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
const union xfs_btree_key *k1, const union xfs_btree_key *k1,
const union xfs_btree_key *k2) const union xfs_btree_key *k2,
const union xfs_btree_key *mask)
{ {
ASSERT(!mask || mask->inobt.ir_startino);
return (int64_t)be32_to_cpu(k1->inobt.ir_startino) - return (int64_t)be32_to_cpu(k1->inobt.ir_startino) -
be32_to_cpu(k2->inobt.ir_startino); be32_to_cpu(k2->inobt.ir_startino);
} }
static xfs_failaddr_t static xfs_failaddr_t
...@@ -387,8 +390,11 @@ STATIC enum xbtree_key_contig ...@@ -387,8 +390,11 @@ STATIC enum xbtree_key_contig
xfs_inobt_keys_contiguous( xfs_inobt_keys_contiguous(
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
const union xfs_btree_key *key1, const union xfs_btree_key *key1,
const union xfs_btree_key *key2) const union xfs_btree_key *key2,
const union xfs_btree_key *mask)
{ {
ASSERT(!mask || mask->inobt.ir_startino);
return xbtree_key_contig(be32_to_cpu(key1->inobt.ir_startino), return xbtree_key_contig(be32_to_cpu(key1->inobt.ir_startino),
be32_to_cpu(key2->inobt.ir_startino)); be32_to_cpu(key2->inobt.ir_startino));
} }
......
...@@ -2019,7 +2019,7 @@ xfs_refcount_has_records( ...@@ -2019,7 +2019,7 @@ xfs_refcount_has_records(
high.rc.rc_startblock = bno + len - 1; high.rc.rc_startblock = bno + len - 1;
low.rc.rc_domain = high.rc.rc_domain = domain; low.rc.rc_domain = high.rc.rc_domain = domain;
return xfs_btree_has_records(cur, &low, &high, outcome); return xfs_btree_has_records(cur, &low, &high, NULL, outcome);
} }
int __init int __init
......
...@@ -202,10 +202,13 @@ STATIC int64_t ...@@ -202,10 +202,13 @@ STATIC int64_t
xfs_refcountbt_diff_two_keys( xfs_refcountbt_diff_two_keys(
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
const union xfs_btree_key *k1, const union xfs_btree_key *k1,
const union xfs_btree_key *k2) const union xfs_btree_key *k2,
const union xfs_btree_key *mask)
{ {
ASSERT(!mask || mask->refc.rc_startblock);
return (int64_t)be32_to_cpu(k1->refc.rc_startblock) - return (int64_t)be32_to_cpu(k1->refc.rc_startblock) -
be32_to_cpu(k2->refc.rc_startblock); be32_to_cpu(k2->refc.rc_startblock);
} }
STATIC xfs_failaddr_t STATIC xfs_failaddr_t
...@@ -304,8 +307,11 @@ STATIC enum xbtree_key_contig ...@@ -304,8 +307,11 @@ STATIC enum xbtree_key_contig
xfs_refcountbt_keys_contiguous( xfs_refcountbt_keys_contiguous(
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
const union xfs_btree_key *key1, const union xfs_btree_key *key1,
const union xfs_btree_key *key2) const union xfs_btree_key *key2,
const union xfs_btree_key *mask)
{ {
ASSERT(!mask || mask->refc.rc_startblock);
return xbtree_key_contig(be32_to_cpu(key1->refc.rc_startblock), return xbtree_key_contig(be32_to_cpu(key1->refc.rc_startblock),
be32_to_cpu(key2->refc.rc_startblock)); be32_to_cpu(key2->refc.rc_startblock));
} }
......
...@@ -2721,6 +2721,9 @@ xfs_rmap_has_records( ...@@ -2721,6 +2721,9 @@ xfs_rmap_has_records(
xfs_extlen_t len, xfs_extlen_t len,
enum xbtree_recpacking *outcome) enum xbtree_recpacking *outcome)
{ {
union xfs_btree_key mask = {
.rmap.rm_startblock = cpu_to_be32(-1U),
};
union xfs_btree_irec low; union xfs_btree_irec low;
union xfs_btree_irec high; union xfs_btree_irec high;
...@@ -2729,7 +2732,7 @@ xfs_rmap_has_records( ...@@ -2729,7 +2732,7 @@ xfs_rmap_has_records(
memset(&high, 0xFF, sizeof(high)); memset(&high, 0xFF, sizeof(high));
high.r.rm_startblock = bno + len - 1; high.r.rm_startblock = bno + len - 1;
return xfs_btree_has_records(cur, &low, &high, outcome); return xfs_btree_has_records(cur, &low, &high, &mask, outcome);
} }
/* /*
......
...@@ -273,31 +273,43 @@ STATIC int64_t ...@@ -273,31 +273,43 @@ STATIC int64_t
xfs_rmapbt_diff_two_keys( xfs_rmapbt_diff_two_keys(
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
const union xfs_btree_key *k1, const union xfs_btree_key *k1,
const union xfs_btree_key *k2) const union xfs_btree_key *k2,
const union xfs_btree_key *mask)
{ {
const struct xfs_rmap_key *kp1 = &k1->rmap; const struct xfs_rmap_key *kp1 = &k1->rmap;
const struct xfs_rmap_key *kp2 = &k2->rmap; const struct xfs_rmap_key *kp2 = &k2->rmap;
int64_t d; int64_t d;
__u64 x, y; __u64 x, y;
/* Doesn't make sense to mask off the physical space part */
ASSERT(!mask || mask->rmap.rm_startblock);
d = (int64_t)be32_to_cpu(kp1->rm_startblock) - d = (int64_t)be32_to_cpu(kp1->rm_startblock) -
be32_to_cpu(kp2->rm_startblock); be32_to_cpu(kp2->rm_startblock);
if (d) if (d)
return d; return d;
x = be64_to_cpu(kp1->rm_owner); if (!mask || mask->rmap.rm_owner) {
y = be64_to_cpu(kp2->rm_owner); x = be64_to_cpu(kp1->rm_owner);
if (x > y) y = be64_to_cpu(kp2->rm_owner);
return 1; if (x > y)
else if (y > x) return 1;
return -1; else if (y > x)
return -1;
}
if (!mask || mask->rmap.rm_offset) {
/* Doesn't make sense to allow offset but not owner */
ASSERT(!mask || mask->rmap.rm_owner);
x = offset_keymask(be64_to_cpu(kp1->rm_offset));
y = offset_keymask(be64_to_cpu(kp2->rm_offset));
if (x > y)
return 1;
else if (y > x)
return -1;
}
x = offset_keymask(be64_to_cpu(kp1->rm_offset));
y = offset_keymask(be64_to_cpu(kp2->rm_offset));
if (x > y)
return 1;
else if (y > x)
return -1;
return 0; return 0;
} }
...@@ -448,13 +460,18 @@ STATIC enum xbtree_key_contig ...@@ -448,13 +460,18 @@ STATIC enum xbtree_key_contig
xfs_rmapbt_keys_contiguous( xfs_rmapbt_keys_contiguous(
struct xfs_btree_cur *cur, struct xfs_btree_cur *cur,
const union xfs_btree_key *key1, const union xfs_btree_key *key1,
const union xfs_btree_key *key2) const union xfs_btree_key *key2,
const union xfs_btree_key *mask)
{ {
ASSERT(!mask || mask->rmap.rm_startblock);
/* /*
* We only support checking contiguity of the physical space component. * We only support checking contiguity of the physical space component.
* If any callers ever need more specificity than that, they'll have to * If any callers ever need more specificity than that, they'll have to
* implement it here. * implement it here.
*/ */
ASSERT(!mask || (!mask->rmap.rm_owner && !mask->rmap.rm_offset));
return xbtree_key_contig(be32_to_cpu(key1->rmap.rm_startblock), return xbtree_key_contig(be32_to_cpu(key1->rmap.rm_startblock),
be32_to_cpu(key2->rmap.rm_startblock)); be32_to_cpu(key2->rmap.rm_startblock));
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment