Commit af1077fa authored by Darrick J. Wong's avatar Darrick J. Wong

Merge tag 'scrub-cleanup-malloc-6.2_2022-11-16' of...

Merge tag 'scrub-cleanup-malloc-6.2_2022-11-16' of git://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux into xfs-6.2-mergeA

xfs: clean up memory allocations in online fsck

This series standardizes the GFP_ flags that we use for memory
allocation in online scrub, and convert the callers away from the old
kmem_alloc code that was ported from Irix.
Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>

* tag 'scrub-cleanup-malloc-6.2_2022-11-16' of git://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux:
  xfs: pivot online scrub away from kmem.[ch]
  xfs: initialize the check_owner object fully
  xfs: standardize GFP flags usage in online scrub
parents 823ca26a 306195f3
......@@ -737,7 +737,7 @@ xchk_agfl(
goto out;
}
sai.entries = kvcalloc(sai.agflcount, sizeof(xfs_agblock_t),
GFP_KERNEL | __GFP_RETRY_MAYFAIL);
XCHK_GFP_FLAGS);
if (!sai.entries) {
error = -ENOMEM;
goto out;
......
......@@ -685,7 +685,7 @@ xrep_agfl_init_header(
if (br->len)
break;
list_del(&br->list);
kmem_free(br);
kfree(br);
}
/* Write new AGFL to disk. */
......
......@@ -49,7 +49,7 @@ xchk_setup_xattr_buf(
if (ab) {
if (sz <= ab->sz)
return 0;
kmem_free(ab);
kvfree(ab);
sc->buf = NULL;
}
......@@ -79,7 +79,8 @@ xchk_setup_xattr(
* without the inode lock held, which means we can sleep.
*/
if (sc->flags & XCHK_TRY_HARDER) {
error = xchk_setup_xattr_buf(sc, XATTR_SIZE_MAX, GFP_KERNEL);
error = xchk_setup_xattr_buf(sc, XATTR_SIZE_MAX,
XCHK_GFP_FLAGS);
if (error)
return error;
}
......@@ -138,8 +139,7 @@ xchk_xattr_listent(
* doesn't work, we overload the seen_enough variable to convey
* the error message back to the main scrub function.
*/
error = xchk_setup_xattr_buf(sx->sc, valuelen,
GFP_KERNEL | __GFP_RETRY_MAYFAIL);
error = xchk_setup_xattr_buf(sx->sc, valuelen, XCHK_GFP_FLAGS);
if (error == -ENOMEM)
error = -EDEADLOCK;
if (error) {
......@@ -324,8 +324,7 @@ xchk_xattr_block(
return 0;
/* Allocate memory for block usage checking. */
error = xchk_setup_xattr_buf(ds->sc, 0,
GFP_KERNEL | __GFP_RETRY_MAYFAIL);
error = xchk_setup_xattr_buf(ds->sc, 0, XCHK_GFP_FLAGS);
if (error == -ENOMEM)
return -EDEADLOCK;
if (error)
......
......@@ -10,6 +10,7 @@
#include "xfs_trans_resv.h"
#include "xfs_mount.h"
#include "xfs_btree.h"
#include "scrub/scrub.h"
#include "scrub/bitmap.h"
/*
......@@ -25,7 +26,7 @@ xbitmap_set(
{
struct xbitmap_range *bmr;
bmr = kmem_alloc(sizeof(struct xbitmap_range), KM_MAYFAIL);
bmr = kmalloc(sizeof(struct xbitmap_range), XCHK_GFP_FLAGS);
if (!bmr)
return -ENOMEM;
......@@ -47,7 +48,7 @@ xbitmap_destroy(
for_each_xbitmap_extent(bmr, n, bitmap) {
list_del(&bmr->list);
kmem_free(bmr);
kfree(bmr);
}
}
......@@ -174,15 +175,15 @@ xbitmap_disunion(
/* Total overlap, just delete ex. */
lp = lp->next;
list_del(&br->list);
kmem_free(br);
kfree(br);
break;
case 0:
/*
* Deleting from the middle: add the new right extent
* and then shrink the left extent.
*/
new_br = kmem_alloc(sizeof(struct xbitmap_range),
KM_MAYFAIL);
new_br = kmalloc(sizeof(struct xbitmap_range),
XCHK_GFP_FLAGS);
if (!new_br) {
error = -ENOMEM;
goto out;
......
......@@ -408,7 +408,6 @@ xchk_btree_check_owner(
struct xfs_buf *bp)
{
struct xfs_btree_cur *cur = bs->cur;
struct check_owner *co;
/*
* In theory, xfs_btree_get_block should only give us a null buffer
......@@ -431,10 +430,13 @@ xchk_btree_check_owner(
* later scanning.
*/
if (cur->bc_btnum == XFS_BTNUM_BNO || cur->bc_btnum == XFS_BTNUM_RMAP) {
co = kmem_alloc(sizeof(struct check_owner),
KM_MAYFAIL);
struct check_owner *co;
co = kmalloc(sizeof(struct check_owner), XCHK_GFP_FLAGS);
if (!co)
return -ENOMEM;
INIT_LIST_HEAD(&co->list);
co->level = level;
co->daddr = xfs_buf_daddr(bp);
list_add_tail(&co->list, &bs->to_check);
......@@ -649,7 +651,7 @@ xchk_btree(
xchk_btree_set_corrupt(sc, cur, 0);
return 0;
}
bs = kmem_zalloc(cur_sz, KM_NOFS | KM_MAYFAIL);
bs = kzalloc(cur_sz, XCHK_GFP_FLAGS);
if (!bs)
return -ENOMEM;
bs->cur = cur;
......@@ -740,9 +742,9 @@ xchk_btree(
error = xchk_btree_check_block_owner(bs, co->level,
co->daddr);
list_del(&co->list);
kmem_free(co);
kfree(co);
}
kmem_free(bs);
kfree(bs);
return error;
}
......@@ -486,7 +486,7 @@ xchk_da_btree(
return 0;
/* Set up initial da state. */
ds = kmem_zalloc(sizeof(struct xchk_da_btree), KM_NOFS | KM_MAYFAIL);
ds = kzalloc(sizeof(struct xchk_da_btree), XCHK_GFP_FLAGS);
if (!ds)
return -ENOMEM;
ds->dargs.dp = sc->ip;
......@@ -591,6 +591,6 @@ xchk_da_btree(
out_state:
xfs_da_state_free(ds->state);
kmem_free(ds);
kfree(ds);
return error;
}
......@@ -116,7 +116,7 @@ xchk_setup_fscounters(
struct xchk_fscounters *fsc;
int error;
sc->buf = kmem_zalloc(sizeof(struct xchk_fscounters), 0);
sc->buf = kzalloc(sizeof(struct xchk_fscounters), XCHK_GFP_FLAGS);
if (!sc->buf)
return -ENOMEM;
fsc = sc->buf;
......
......@@ -127,8 +127,8 @@ xchk_refcountbt_rmap_check(
* is healthy each rmap_irec we see will be in agbno order
* so we don't need insertion sort here.
*/
frag = kmem_alloc(sizeof(struct xchk_refcnt_frag),
KM_MAYFAIL);
frag = kmalloc(sizeof(struct xchk_refcnt_frag),
XCHK_GFP_FLAGS);
if (!frag)
return -ENOMEM;
memcpy(&frag->rm, rec, sizeof(frag->rm));
......@@ -215,7 +215,7 @@ xchk_refcountbt_process_rmap_fragments(
continue;
}
list_del(&frag->list);
kmem_free(frag);
kfree(frag);
nr++;
}
......@@ -257,11 +257,11 @@ xchk_refcountbt_process_rmap_fragments(
/* Delete fragments and work list. */
list_for_each_entry_safe(frag, n, &worklist, list) {
list_del(&frag->list);
kmem_free(frag);
kfree(frag);
}
list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
list_del(&frag->list);
kmem_free(frag);
kfree(frag);
}
}
......@@ -306,7 +306,7 @@ xchk_refcountbt_xref_rmap(
out_free:
list_for_each_entry_safe(frag, n, &refchk.fragments, list) {
list_del(&frag->list);
kmem_free(frag);
kfree(frag);
}
}
......
......@@ -174,7 +174,7 @@ xchk_teardown(
if (sc->flags & XCHK_REAPING_DISABLED)
xchk_start_reaping(sc);
if (sc->buf) {
kmem_free(sc->buf);
kvfree(sc->buf);
sc->buf = NULL;
}
return error;
......@@ -467,7 +467,7 @@ xfs_scrub_metadata(
xfs_warn_mount(mp, XFS_OPSTATE_WARNED_SCRUB,
"EXPERIMENTAL online scrub feature in use. Use at your own risk!");
sc = kmem_zalloc(sizeof(struct xfs_scrub), KM_NOFS | KM_MAYFAIL);
sc = kzalloc(sizeof(struct xfs_scrub), XCHK_GFP_FLAGS);
if (!sc) {
error = -ENOMEM;
goto out;
......@@ -557,7 +557,7 @@ xfs_scrub_metadata(
out_teardown:
error = xchk_teardown(sc, error);
out_sc:
kmem_free(sc);
kfree(sc);
out:
trace_xchk_done(XFS_I(file_inode(file)), sm, error);
if (error == -EFSCORRUPTED || error == -EFSBADCRC) {
......
......@@ -8,6 +8,15 @@
struct xfs_scrub;
/*
* Standard flags for allocating memory within scrub. NOFS context is
* configured by the process allocation scope. Scrub and repair must be able
* to back out gracefully if there isn't enough memory. Force-cast to avoid
* complaints from static checkers.
*/
#define XCHK_GFP_FLAGS ((__force gfp_t)(GFP_KERNEL | __GFP_NOWARN | \
__GFP_RETRY_MAYFAIL))
/* Type info and names for the scrub types. */
enum xchk_type {
ST_NONE = 1, /* disabled */
......
......@@ -21,7 +21,7 @@ xchk_setup_symlink(
struct xfs_scrub *sc)
{
/* Allocate the buffer without the inode lock held. */
sc->buf = kvzalloc(XFS_SYMLINK_MAXLEN + 1, GFP_KERNEL);
sc->buf = kvzalloc(XFS_SYMLINK_MAXLEN + 1, XCHK_GFP_FLAGS);
if (!sc->buf)
return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment