Commit af1077fa authored by Darrick J. Wong's avatar Darrick J. Wong

Merge tag 'scrub-cleanup-malloc-6.2_2022-11-16' of...

Merge tag 'scrub-cleanup-malloc-6.2_2022-11-16' of git://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux into xfs-6.2-mergeA

xfs: clean up memory allocations in online fsck

This series standardizes the GFP_ flags that we use for memory
allocation in online scrub, and convert the callers away from the old
kmem_alloc code that was ported from Irix.
Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>

* tag 'scrub-cleanup-malloc-6.2_2022-11-16' of git://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux:
  xfs: pivot online scrub away from kmem.[ch]
  xfs: initialize the check_owner object fully
  xfs: standardize GFP flags usage in online scrub
parents 823ca26a 306195f3
...@@ -737,7 +737,7 @@ xchk_agfl( ...@@ -737,7 +737,7 @@ xchk_agfl(
goto out; goto out;
} }
sai.entries = kvcalloc(sai.agflcount, sizeof(xfs_agblock_t), sai.entries = kvcalloc(sai.agflcount, sizeof(xfs_agblock_t),
GFP_KERNEL | __GFP_RETRY_MAYFAIL); XCHK_GFP_FLAGS);
if (!sai.entries) { if (!sai.entries) {
error = -ENOMEM; error = -ENOMEM;
goto out; goto out;
......
...@@ -685,7 +685,7 @@ xrep_agfl_init_header( ...@@ -685,7 +685,7 @@ xrep_agfl_init_header(
if (br->len) if (br->len)
break; break;
list_del(&br->list); list_del(&br->list);
kmem_free(br); kfree(br);
} }
/* Write new AGFL to disk. */ /* Write new AGFL to disk. */
......
...@@ -49,7 +49,7 @@ xchk_setup_xattr_buf( ...@@ -49,7 +49,7 @@ xchk_setup_xattr_buf(
if (ab) { if (ab) {
if (sz <= ab->sz) if (sz <= ab->sz)
return 0; return 0;
kmem_free(ab); kvfree(ab);
sc->buf = NULL; sc->buf = NULL;
} }
...@@ -79,7 +79,8 @@ xchk_setup_xattr( ...@@ -79,7 +79,8 @@ xchk_setup_xattr(
* without the inode lock held, which means we can sleep. * without the inode lock held, which means we can sleep.
*/ */
if (sc->flags & XCHK_TRY_HARDER) { if (sc->flags & XCHK_TRY_HARDER) {
error = xchk_setup_xattr_buf(sc, XATTR_SIZE_MAX, GFP_KERNEL); error = xchk_setup_xattr_buf(sc, XATTR_SIZE_MAX,
XCHK_GFP_FLAGS);
if (error) if (error)
return error; return error;
} }
...@@ -138,8 +139,7 @@ xchk_xattr_listent( ...@@ -138,8 +139,7 @@ xchk_xattr_listent(
* doesn't work, we overload the seen_enough variable to convey * doesn't work, we overload the seen_enough variable to convey
* the error message back to the main scrub function. * the error message back to the main scrub function.
*/ */
error = xchk_setup_xattr_buf(sx->sc, valuelen, error = xchk_setup_xattr_buf(sx->sc, valuelen, XCHK_GFP_FLAGS);
GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (error == -ENOMEM) if (error == -ENOMEM)
error = -EDEADLOCK; error = -EDEADLOCK;
if (error) { if (error) {
...@@ -324,8 +324,7 @@ xchk_xattr_block( ...@@ -324,8 +324,7 @@ xchk_xattr_block(
return 0; return 0;
/* Allocate memory for block usage checking. */ /* Allocate memory for block usage checking. */
error = xchk_setup_xattr_buf(ds->sc, 0, error = xchk_setup_xattr_buf(ds->sc, 0, XCHK_GFP_FLAGS);
GFP_KERNEL | __GFP_RETRY_MAYFAIL);
if (error == -ENOMEM) if (error == -ENOMEM)
return -EDEADLOCK; return -EDEADLOCK;
if (error) if (error)
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include "xfs_trans_resv.h" #include "xfs_trans_resv.h"
#include "xfs_mount.h" #include "xfs_mount.h"
#include "xfs_btree.h" #include "xfs_btree.h"
#include "scrub/scrub.h"
#include "scrub/bitmap.h" #include "scrub/bitmap.h"
/* /*
...@@ -25,7 +26,7 @@ xbitmap_set( ...@@ -25,7 +26,7 @@ xbitmap_set(
{ {
struct xbitmap_range *bmr; struct xbitmap_range *bmr;
bmr = kmem_alloc(sizeof(struct xbitmap_range), KM_MAYFAIL); bmr = kmalloc(sizeof(struct xbitmap_range), XCHK_GFP_FLAGS);
if (!bmr) if (!bmr)
return -ENOMEM; return -ENOMEM;
...@@ -47,7 +48,7 @@ xbitmap_destroy( ...@@ -47,7 +48,7 @@ xbitmap_destroy(
for_each_xbitmap_extent(bmr, n, bitmap) { for_each_xbitmap_extent(bmr, n, bitmap) {
list_del(&bmr->list); list_del(&bmr->list);
kmem_free(bmr); kfree(bmr);
} }
} }
...@@ -174,15 +175,15 @@ xbitmap_disunion( ...@@ -174,15 +175,15 @@ xbitmap_disunion(
/* Total overlap, just delete ex. */ /* Total overlap, just delete ex. */
lp = lp->next; lp = lp->next;
list_del(&br->list); list_del(&br->list);
kmem_free(br); kfree(br);
break; break;
case 0: case 0:
/* /*
* Deleting from the middle: add the new right extent * Deleting from the middle: add the new right extent
* and then shrink the left extent. * and then shrink the left extent.
*/ */
new_br = kmem_alloc(sizeof(struct xbitmap_range), new_br = kmalloc(sizeof(struct xbitmap_range),
KM_MAYFAIL); XCHK_GFP_FLAGS);
if (!new_br) { if (!new_br) {
error = -ENOMEM; error = -ENOMEM;
goto out; goto out;
......
...@@ -408,7 +408,6 @@ xchk_btree_check_owner( ...@@ -408,7 +408,6 @@ xchk_btree_check_owner(
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
struct xfs_btree_cur *cur = bs->cur; struct xfs_btree_cur *cur = bs->cur;
struct check_owner *co;
/* /*
* In theory, xfs_btree_get_block should only give us a null buffer * In theory, xfs_btree_get_block should only give us a null buffer
...@@ -431,10 +430,13 @@ xchk_btree_check_owner( ...@@ -431,10 +430,13 @@ xchk_btree_check_owner(
* later scanning. * later scanning.
*/ */
if (cur->bc_btnum == XFS_BTNUM_BNO || cur->bc_btnum == XFS_BTNUM_RMAP) { if (cur->bc_btnum == XFS_BTNUM_BNO || cur->bc_btnum == XFS_BTNUM_RMAP) {
co = kmem_alloc(sizeof(struct check_owner), struct check_owner *co;
KM_MAYFAIL);
co = kmalloc(sizeof(struct check_owner), XCHK_GFP_FLAGS);
if (!co) if (!co)
return -ENOMEM; return -ENOMEM;
INIT_LIST_HEAD(&co->list);
co->level = level; co->level = level;
co->daddr = xfs_buf_daddr(bp); co->daddr = xfs_buf_daddr(bp);
list_add_tail(&co->list, &bs->to_check); list_add_tail(&co->list, &bs->to_check);
...@@ -649,7 +651,7 @@ xchk_btree( ...@@ -649,7 +651,7 @@ xchk_btree(
xchk_btree_set_corrupt(sc, cur, 0); xchk_btree_set_corrupt(sc, cur, 0);
return 0; return 0;
} }
bs = kmem_zalloc(cur_sz, KM_NOFS | KM_MAYFAIL); bs = kzalloc(cur_sz, XCHK_GFP_FLAGS);
if (!bs) if (!bs)
return -ENOMEM; return -ENOMEM;
bs->cur = cur; bs->cur = cur;
...@@ -740,9 +742,9 @@ xchk_btree( ...@@ -740,9 +742,9 @@ xchk_btree(
error = xchk_btree_check_block_owner(bs, co->level, error = xchk_btree_check_block_owner(bs, co->level,
co->daddr); co->daddr);
list_del(&co->list); list_del(&co->list);
kmem_free(co); kfree(co);
} }
kmem_free(bs); kfree(bs);
return error; return error;
} }
...@@ -486,7 +486,7 @@ xchk_da_btree( ...@@ -486,7 +486,7 @@ xchk_da_btree(
return 0; return 0;
/* Set up initial da state. */ /* Set up initial da state. */
ds = kmem_zalloc(sizeof(struct xchk_da_btree), KM_NOFS | KM_MAYFAIL); ds = kzalloc(sizeof(struct xchk_da_btree), XCHK_GFP_FLAGS);
if (!ds) if (!ds)
return -ENOMEM; return -ENOMEM;
ds->dargs.dp = sc->ip; ds->dargs.dp = sc->ip;
...@@ -591,6 +591,6 @@ xchk_da_btree( ...@@ -591,6 +591,6 @@ xchk_da_btree(
out_state: out_state:
xfs_da_state_free(ds->state); xfs_da_state_free(ds->state);
kmem_free(ds); kfree(ds);
return error; return error;
} }
...@@ -116,7 +116,7 @@ xchk_setup_fscounters( ...@@ -116,7 +116,7 @@ xchk_setup_fscounters(
struct xchk_fscounters *fsc; struct xchk_fscounters *fsc;
int error; int error;
sc->buf = kmem_zalloc(sizeof(struct xchk_fscounters), 0); sc->buf = kzalloc(sizeof(struct xchk_fscounters), XCHK_GFP_FLAGS);
if (!sc->buf) if (!sc->buf)
return -ENOMEM; return -ENOMEM;
fsc = sc->buf; fsc = sc->buf;
......
...@@ -127,8 +127,8 @@ xchk_refcountbt_rmap_check( ...@@ -127,8 +127,8 @@ xchk_refcountbt_rmap_check(
* is healthy each rmap_irec we see will be in agbno order * is healthy each rmap_irec we see will be in agbno order
* so we don't need insertion sort here. * so we don't need insertion sort here.
*/ */
frag = kmem_alloc(sizeof(struct xchk_refcnt_frag), frag = kmalloc(sizeof(struct xchk_refcnt_frag),
KM_MAYFAIL); XCHK_GFP_FLAGS);
if (!frag) if (!frag)
return -ENOMEM; return -ENOMEM;
memcpy(&frag->rm, rec, sizeof(frag->rm)); memcpy(&frag->rm, rec, sizeof(frag->rm));
...@@ -215,7 +215,7 @@ xchk_refcountbt_process_rmap_fragments( ...@@ -215,7 +215,7 @@ xchk_refcountbt_process_rmap_fragments(
continue; continue;
} }
list_del(&frag->list); list_del(&frag->list);
kmem_free(frag); kfree(frag);
nr++; nr++;
} }
...@@ -257,11 +257,11 @@ xchk_refcountbt_process_rmap_fragments( ...@@ -257,11 +257,11 @@ xchk_refcountbt_process_rmap_fragments(
/* Delete fragments and work list. */ /* Delete fragments and work list. */
list_for_each_entry_safe(frag, n, &worklist, list) { list_for_each_entry_safe(frag, n, &worklist, list) {
list_del(&frag->list); list_del(&frag->list);
kmem_free(frag); kfree(frag);
} }
list_for_each_entry_safe(frag, n, &refchk->fragments, list) { list_for_each_entry_safe(frag, n, &refchk->fragments, list) {
list_del(&frag->list); list_del(&frag->list);
kmem_free(frag); kfree(frag);
} }
} }
...@@ -306,7 +306,7 @@ xchk_refcountbt_xref_rmap( ...@@ -306,7 +306,7 @@ xchk_refcountbt_xref_rmap(
out_free: out_free:
list_for_each_entry_safe(frag, n, &refchk.fragments, list) { list_for_each_entry_safe(frag, n, &refchk.fragments, list) {
list_del(&frag->list); list_del(&frag->list);
kmem_free(frag); kfree(frag);
} }
} }
......
...@@ -174,7 +174,7 @@ xchk_teardown( ...@@ -174,7 +174,7 @@ xchk_teardown(
if (sc->flags & XCHK_REAPING_DISABLED) if (sc->flags & XCHK_REAPING_DISABLED)
xchk_start_reaping(sc); xchk_start_reaping(sc);
if (sc->buf) { if (sc->buf) {
kmem_free(sc->buf); kvfree(sc->buf);
sc->buf = NULL; sc->buf = NULL;
} }
return error; return error;
...@@ -467,7 +467,7 @@ xfs_scrub_metadata( ...@@ -467,7 +467,7 @@ xfs_scrub_metadata(
xfs_warn_mount(mp, XFS_OPSTATE_WARNED_SCRUB, xfs_warn_mount(mp, XFS_OPSTATE_WARNED_SCRUB,
"EXPERIMENTAL online scrub feature in use. Use at your own risk!"); "EXPERIMENTAL online scrub feature in use. Use at your own risk!");
sc = kmem_zalloc(sizeof(struct xfs_scrub), KM_NOFS | KM_MAYFAIL); sc = kzalloc(sizeof(struct xfs_scrub), XCHK_GFP_FLAGS);
if (!sc) { if (!sc) {
error = -ENOMEM; error = -ENOMEM;
goto out; goto out;
...@@ -557,7 +557,7 @@ xfs_scrub_metadata( ...@@ -557,7 +557,7 @@ xfs_scrub_metadata(
out_teardown: out_teardown:
error = xchk_teardown(sc, error); error = xchk_teardown(sc, error);
out_sc: out_sc:
kmem_free(sc); kfree(sc);
out: out:
trace_xchk_done(XFS_I(file_inode(file)), sm, error); trace_xchk_done(XFS_I(file_inode(file)), sm, error);
if (error == -EFSCORRUPTED || error == -EFSBADCRC) { if (error == -EFSCORRUPTED || error == -EFSBADCRC) {
......
...@@ -8,6 +8,15 @@ ...@@ -8,6 +8,15 @@
struct xfs_scrub; struct xfs_scrub;
/*
* Standard flags for allocating memory within scrub. NOFS context is
* configured by the process allocation scope. Scrub and repair must be able
* to back out gracefully if there isn't enough memory. Force-cast to avoid
* complaints from static checkers.
*/
#define XCHK_GFP_FLAGS ((__force gfp_t)(GFP_KERNEL | __GFP_NOWARN | \
__GFP_RETRY_MAYFAIL))
/* Type info and names for the scrub types. */ /* Type info and names for the scrub types. */
enum xchk_type { enum xchk_type {
ST_NONE = 1, /* disabled */ ST_NONE = 1, /* disabled */
......
...@@ -21,7 +21,7 @@ xchk_setup_symlink( ...@@ -21,7 +21,7 @@ xchk_setup_symlink(
struct xfs_scrub *sc) struct xfs_scrub *sc)
{ {
/* Allocate the buffer without the inode lock held. */ /* Allocate the buffer without the inode lock held. */
sc->buf = kvzalloc(XFS_SYMLINK_MAXLEN + 1, GFP_KERNEL); sc->buf = kvzalloc(XFS_SYMLINK_MAXLEN + 1, XCHK_GFP_FLAGS);
if (!sc->buf) if (!sc->buf)
return -ENOMEM; return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment