Commit 232ea052 authored by Darrick J. Wong's avatar Darrick J. Wong

xfs: enable sorting of xfile-backed arrays

The btree bulk loading code requires that records be provided in the
correct record sort order for the given btree type.  In general, repair
code cannot be required to collect records in order, and it is not
feasible to insert new records in the middle of an array to maintain
sort order.

Implement a sorting algorithm so that we can sort the records just prior
to bulk loading.  In principle, an xfarray could consume many gigabytes
of memory and its backing pages can be sent out to disk at any time.
This means that we cannot map the entire array into memory at once, so
we must find a way to divide the work into smaller portions (e.g. a
page) that /can/ be mapped into memory.

Quicksort seems like a reasonable fit for this purpose, since it uses a
divide and conquer strategy to keep its average runtime logarithmic.
The solution presented here is a port of the glibc implementation, which
itself is derived from the median-of-three and tail call recursion
strategies outlined by Sedgwick.

Subsequent patches will optimize the implementation further by utilizing
the kernel's heapsort on directly-mapped memory whenever possible, and
improving the quicksort pivot selection algorithm to try to avoid O(n^2)
collapses.

Note: The sorting functionality gets its own patch because the basic big
array mechanisms were plenty for a single code patch.
Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>
Reviewed-by: default avatarKent Overstreet <kent.overstreet@linux.dev>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
parent 3934e8eb
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
struct xfile; struct xfile;
struct xfarray; struct xfarray;
struct xfarray_sortinfo;
/* /*
* ftrace's __print_symbolic requires that all enum values be wrapped in the * ftrace's __print_symbolic requires that all enum values be wrapped in the
...@@ -846,6 +847,119 @@ TRACE_EVENT(xfarray_create, ...@@ -846,6 +847,119 @@ TRACE_EVENT(xfarray_create,
__entry->obj_size_log) __entry->obj_size_log)
); );
TRACE_EVENT(xfarray_isort,
TP_PROTO(struct xfarray_sortinfo *si, uint64_t lo, uint64_t hi),
TP_ARGS(si, lo, hi),
TP_STRUCT__entry(
__field(unsigned long, ino)
__field(unsigned long long, lo)
__field(unsigned long long, hi)
),
TP_fast_assign(
__entry->ino = file_inode(si->array->xfile->file)->i_ino;
__entry->lo = lo;
__entry->hi = hi;
),
TP_printk("xfino 0x%lx lo %llu hi %llu elts %llu",
__entry->ino,
__entry->lo,
__entry->hi,
__entry->hi - __entry->lo)
);
TRACE_EVENT(xfarray_qsort,
TP_PROTO(struct xfarray_sortinfo *si, uint64_t lo, uint64_t hi),
TP_ARGS(si, lo, hi),
TP_STRUCT__entry(
__field(unsigned long, ino)
__field(unsigned long long, lo)
__field(unsigned long long, hi)
__field(int, stack_depth)
__field(int, max_stack_depth)
),
TP_fast_assign(
__entry->ino = file_inode(si->array->xfile->file)->i_ino;
__entry->lo = lo;
__entry->hi = hi;
__entry->stack_depth = si->stack_depth;
__entry->max_stack_depth = si->max_stack_depth;
),
TP_printk("xfino 0x%lx lo %llu hi %llu elts %llu stack %d/%d",
__entry->ino,
__entry->lo,
__entry->hi,
__entry->hi - __entry->lo,
__entry->stack_depth,
__entry->max_stack_depth)
);
TRACE_EVENT(xfarray_sort,
TP_PROTO(struct xfarray_sortinfo *si, size_t bytes),
TP_ARGS(si, bytes),
TP_STRUCT__entry(
__field(unsigned long, ino)
__field(unsigned long long, nr)
__field(size_t, obj_size)
__field(size_t, bytes)
__field(unsigned int, max_stack_depth)
),
TP_fast_assign(
__entry->nr = si->array->nr;
__entry->obj_size = si->array->obj_size;
__entry->ino = file_inode(si->array->xfile->file)->i_ino;
__entry->bytes = bytes;
__entry->max_stack_depth = si->max_stack_depth;
),
TP_printk("xfino 0x%lx nr %llu objsz %zu stack %u bytes %zu",
__entry->ino,
__entry->nr,
__entry->obj_size,
__entry->max_stack_depth,
__entry->bytes)
);
TRACE_EVENT(xfarray_sort_stats,
TP_PROTO(struct xfarray_sortinfo *si, int error),
TP_ARGS(si, error),
TP_STRUCT__entry(
__field(unsigned long, ino)
#ifdef DEBUG
__field(unsigned long long, loads)
__field(unsigned long long, stores)
__field(unsigned long long, compares)
#endif
__field(unsigned int, max_stack_depth)
__field(unsigned int, max_stack_used)
__field(int, error)
),
TP_fast_assign(
__entry->ino = file_inode(si->array->xfile->file)->i_ino;
#ifdef DEBUG
__entry->loads = si->loads;
__entry->stores = si->stores;
__entry->compares = si->compares;
#endif
__entry->max_stack_depth = si->max_stack_depth;
__entry->max_stack_used = si->max_stack_used;
__entry->error = error;
),
TP_printk(
#ifdef DEBUG
"xfino 0x%lx loads %llu stores %llu compares %llu stack_depth %u/%u error %d",
#else
"xfino 0x%lx stack_depth %u/%u error %d",
#endif
__entry->ino,
#ifdef DEBUG
__entry->loads,
__entry->stores,
__entry->compares,
#endif
__entry->max_stack_used,
__entry->max_stack_depth,
__entry->error)
);
/* repair tracepoints */ /* repair tracepoints */
#if IS_ENABLED(CONFIG_XFS_ONLINE_REPAIR) #if IS_ENABLED(CONFIG_XFS_ONLINE_REPAIR)
......
This diff is collapsed.
...@@ -54,4 +54,71 @@ static inline int xfarray_append(struct xfarray *array, const void *ptr) ...@@ -54,4 +54,71 @@ static inline int xfarray_append(struct xfarray *array, const void *ptr)
uint64_t xfarray_length(struct xfarray *array); uint64_t xfarray_length(struct xfarray *array);
int xfarray_load_next(struct xfarray *array, xfarray_idx_t *idx, void *rec); int xfarray_load_next(struct xfarray *array, xfarray_idx_t *idx, void *rec);
/* Declarations for xfile array sort functionality. */
typedef cmp_func_t xfarray_cmp_fn;
struct xfarray_sortinfo {
struct xfarray *array;
/* Comparison function for the sort. */
xfarray_cmp_fn cmp_fn;
/* Maximum height of the partition stack. */
uint8_t max_stack_depth;
/* Current height of the partition stack. */
int8_t stack_depth;
/* Maximum stack depth ever used. */
uint8_t max_stack_used;
/* XFARRAY_SORT_* flags; see below. */
unsigned int flags;
#ifdef DEBUG
/* Performance statistics. */
uint64_t loads;
uint64_t stores;
uint64_t compares;
#endif
/*
* Extra bytes are allocated beyond the end of the structure to store
* quicksort information. C does not permit multiple VLAs per struct,
* so we document all of this in a comment.
*
* Pretend that we have a typedef for array records:
*
* typedef char[array->obj_size] xfarray_rec_t;
*
* First comes the quicksort partition stack:
*
* xfarray_idx_t lo[max_stack_depth];
* xfarray_idx_t hi[max_stack_depth];
*
* union {
*
* If for a given subset we decide to use an insertion sort, we use the
* scratchpad record after the xfarray and a second scratchpad record
* here to compare items:
*
* xfarray_rec_t scratch;
*
* Otherwise, we want to partition the records to partition the array.
* We store the chosen pivot record here and use the xfarray scratchpad
* to rearrange the array around the pivot:
*
* xfarray_rec_t pivot;
*
* }
*/
};
/* Sort can be interrupted by a fatal signal. */
#define XFARRAY_SORT_KILLABLE (1U << 0)
int xfarray_sort(struct xfarray *array, xfarray_cmp_fn cmp_fn,
unsigned int flags);
#endif /* __XFS_SCRUB_XFARRAY_H__ */ #endif /* __XFS_SCRUB_XFARRAY_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment