Commit ebe0f798 authored by Chandan Babu R's avatar Chandan Babu R

Merge tag 'log-incompat-permissions-6.10_2024-04-15' of...

Merge tag 'log-incompat-permissions-6.10_2024-04-15' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux into xfs-6.10-mergeA

xfs: improve log incompat feature handling

This patchset improves the performance of log incompat feature bit
handling by making a few changes to how the filesystem handles them.
First, we now only clear the bits during a clean unmount to reduce calls
to the (expensive) upgrade function to once per bit per mount.  Second,
we now only allow incompat feature upgrades for sysadmins or if the
sysadmin explicitly allows it via mount option.  Currently the only log
incompat user is logged xattrs, which requires CONFIG_XFS_DEBUG=y, so
there should be no user visible impact to this change.
Signed-off-by: default avatarDarrick J. Wong <djwong@kernel.org>
Signed-off-by: default avatarChandan Babu R <chandanbabu@kernel.org>

* tag 'log-incompat-permissions-6.10_2024-04-15' of https://git.kernel.org/pub/scm/linux/kernel/git/djwong/xfs-linux:
  xfs: only clear log incompat flags at clean unmount
  xfs: fix error bailout in xrep_abt_build_new_trees
  xfs: fix potential AGI <-> ILOCK ABBA deadlock in xrep_dinode_findmode_walk_directory
  xfs: fix an AGI lock acquisition ordering problem in xrep_dinode_findmode
  xfs: pass xfs_buf lookup flags to xfs_*read_agi
parents 0bbac3fa 5302a5c8
......@@ -4047,9 +4047,6 @@ series.
| one ``struct rw_semaphore`` for each feature. |
| The log cleaning code tries to take this rwsem in exclusive mode to |
| clear the bit; if the lock attempt fails, the feature bit remains set. |
| Filesystem code signals its intention to use a log incompat feature in a |
| transaction by calling ``xlog_use_incompat_feat``, which takes the rwsem |
| in shared mode. |
| The code supporting a log incompat feature should create wrapper |
| functions to obtain the log feature and call |
| ``xfs_add_incompat_log_feature`` to set the feature bits in the primary |
......
......@@ -194,7 +194,7 @@ xfs_initialize_perag_data(
pag = xfs_perag_get(mp, index);
error = xfs_alloc_read_agf(pag, NULL, 0, NULL);
if (!error)
error = xfs_ialloc_read_agi(pag, NULL, NULL);
error = xfs_ialloc_read_agi(pag, NULL, 0, NULL);
if (error) {
xfs_perag_put(pag);
return error;
......@@ -931,7 +931,7 @@ xfs_ag_shrink_space(
int error, err2;
ASSERT(pag->pag_agno == mp->m_sb.sb_agcount - 1);
error = xfs_ialloc_read_agi(pag, *tpp, &agibp);
error = xfs_ialloc_read_agi(pag, *tpp, 0, &agibp);
if (error)
return error;
......@@ -1062,7 +1062,7 @@ xfs_ag_extend_space(
ASSERT(pag->pag_agno == pag->pag_mount->m_sb.sb_agcount - 1);
error = xfs_ialloc_read_agi(pag, tp, &bp);
error = xfs_ialloc_read_agi(pag, tp, 0, &bp);
if (error)
return error;
......@@ -1119,7 +1119,7 @@ xfs_ag_get_geometry(
int error;
/* Lock the AG headers. */
error = xfs_ialloc_read_agi(pag, NULL, &agi_bp);
error = xfs_ialloc_read_agi(pag, NULL, 0, &agi_bp);
if (error)
return error;
error = xfs_alloc_read_agf(pag, NULL, 0, &agf_bp);
......
......@@ -1699,7 +1699,7 @@ xfs_dialloc_good_ag(
return false;
if (!xfs_perag_initialised_agi(pag)) {
error = xfs_ialloc_read_agi(pag, tp, NULL);
error = xfs_ialloc_read_agi(pag, tp, 0, NULL);
if (error)
return false;
}
......@@ -1768,7 +1768,7 @@ xfs_dialloc_try_ag(
* Then read in the AGI buffer and recheck with the AGI buffer
* lock held.
*/
error = xfs_ialloc_read_agi(pag, *tpp, &agbp);
error = xfs_ialloc_read_agi(pag, *tpp, 0, &agbp);
if (error)
return error;
......@@ -2286,7 +2286,7 @@ xfs_difree(
/*
* Get the allocation group header.
*/
error = xfs_ialloc_read_agi(pag, tp, &agbp);
error = xfs_ialloc_read_agi(pag, tp, 0, &agbp);
if (error) {
xfs_warn(mp, "%s: xfs_ialloc_read_agi() returned error %d.",
__func__, error);
......@@ -2332,7 +2332,7 @@ xfs_imap_lookup(
int error;
int i;
error = xfs_ialloc_read_agi(pag, tp, &agbp);
error = xfs_ialloc_read_agi(pag, tp, 0, &agbp);
if (error) {
xfs_alert(mp,
"%s: xfs_ialloc_read_agi() returned error %d, agno %d",
......@@ -2675,6 +2675,7 @@ int
xfs_read_agi(
struct xfs_perag *pag,
struct xfs_trans *tp,
xfs_buf_flags_t flags,
struct xfs_buf **agibpp)
{
struct xfs_mount *mp = pag->pag_mount;
......@@ -2684,7 +2685,7 @@ xfs_read_agi(
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp,
XFS_AG_DADDR(mp, pag->pag_agno, XFS_AGI_DADDR(mp)),
XFS_FSS_TO_BB(mp, 1), 0, agibpp, &xfs_agi_buf_ops);
XFS_FSS_TO_BB(mp, 1), flags, agibpp, &xfs_agi_buf_ops);
if (xfs_metadata_is_sick(error))
xfs_ag_mark_sick(pag, XFS_SICK_AG_AGI);
if (error)
......@@ -2704,6 +2705,7 @@ int
xfs_ialloc_read_agi(
struct xfs_perag *pag,
struct xfs_trans *tp,
int flags,
struct xfs_buf **agibpp)
{
struct xfs_buf *agibp;
......@@ -2712,7 +2714,9 @@ xfs_ialloc_read_agi(
trace_xfs_ialloc_read_agi(pag->pag_mount, pag->pag_agno);
error = xfs_read_agi(pag, tp, &agibp);
error = xfs_read_agi(pag, tp,
(flags & XFS_IALLOC_FLAG_TRYLOCK) ? XBF_TRYLOCK : 0,
&agibp);
if (error)
return error;
......
......@@ -63,10 +63,11 @@ xfs_ialloc_log_agi(
struct xfs_buf *bp, /* allocation group header buffer */
uint32_t fields); /* bitmask of fields to log */
int xfs_read_agi(struct xfs_perag *pag, struct xfs_trans *tp,
int xfs_read_agi(struct xfs_perag *pag, struct xfs_trans *tp, xfs_buf_flags_t flags,
struct xfs_buf **agibpp);
int xfs_ialloc_read_agi(struct xfs_perag *pag, struct xfs_trans *tp,
struct xfs_buf **agibpp);
int flags, struct xfs_buf **agibpp);
#define XFS_IALLOC_FLAG_TRYLOCK (1U << 0) /* use trylock for buffer locking */
/*
* Lookup a record by ino in the btree given by cur.
......
......@@ -745,7 +745,7 @@ xfs_finobt_count_blocks(
struct xfs_btree_cur *cur;
int error;
error = xfs_ialloc_read_agi(pag, tp, &agbp);
error = xfs_ialloc_read_agi(pag, tp, 0, &agbp);
if (error)
return error;
......@@ -768,7 +768,7 @@ xfs_finobt_read_blocks(
struct xfs_agi *agi;
int error;
error = xfs_ialloc_read_agi(pag, tp, &agbp);
error = xfs_ialloc_read_agi(pag, tp, 0, &agbp);
if (error)
return error;
......
......@@ -778,7 +778,7 @@ xrep_abt_build_new_trees(
error = xrep_bnobt_sort_records(ra);
if (error)
return error;
goto err_levels;
/* Load the free space by block number tree. */
ra->array_cur = XFARRAY_CURSOR_INIT;
......
......@@ -445,7 +445,7 @@ xchk_perag_read_headers(
{
int error;
error = xfs_ialloc_read_agi(sa->pag, sc->tp, &sa->agi_bp);
error = xfs_ialloc_read_agi(sa->pag, sc->tp, 0, &sa->agi_bp);
if (error && want_ag_read_header_failure(sc, XFS_SCRUB_TYPE_AGI))
return error;
......@@ -827,7 +827,7 @@ xchk_iget_agi(
* in the iget cache miss path.
*/
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, inum));
error = xfs_ialloc_read_agi(pag, tp, agi_bpp);
error = xfs_ialloc_read_agi(pag, tp, 0, agi_bpp);
xfs_perag_put(pag);
if (error)
return error;
......
......@@ -85,7 +85,7 @@ xchk_fscount_warmup(
continue;
/* Lock both AG headers. */
error = xfs_ialloc_read_agi(pag, sc->tp, &agi_bp);
error = xfs_ialloc_read_agi(pag, sc->tp, 0, &agi_bp);
if (error)
break;
error = xfs_alloc_read_agf(pag, sc->tp, 0, &agf_bp);
......
......@@ -282,6 +282,51 @@ xrep_dinode_findmode_dirent(
return 0;
}
/* Try to lock a directory, or wait a jiffy. */
static inline int
xrep_dinode_ilock_nowait(
struct xfs_inode *dp,
unsigned int lock_mode)
{
if (xfs_ilock_nowait(dp, lock_mode))
return true;
schedule_timeout_killable(1);
return false;
}
/*
* Try to lock a directory to look for ftype hints. Since we already hold the
* AGI buffer, we cannot block waiting for the ILOCK because rename can take
* the ILOCK and then try to lock AGIs.
*/
STATIC int
xrep_dinode_trylock_directory(
struct xrep_inode *ri,
struct xfs_inode *dp,
unsigned int *lock_modep)
{
unsigned long deadline = jiffies + msecs_to_jiffies(30000);
unsigned int lock_mode;
int error = 0;
do {
if (xchk_should_terminate(ri->sc, &error))
return error;
if (xfs_need_iread_extents(&dp->i_df))
lock_mode = XFS_ILOCK_EXCL;
else
lock_mode = XFS_ILOCK_SHARED;
if (xrep_dinode_ilock_nowait(dp, lock_mode)) {
*lock_modep = lock_mode;
return 0;
}
} while (!time_is_before_jiffies(deadline));
return -EBUSY;
}
/*
* If this is a directory, walk the dirents looking for any that point to the
* scrub target inode.
......@@ -299,7 +344,9 @@ xrep_dinode_findmode_walk_directory(
* Scan the directory to see if there it contains an entry pointing to
* the directory that we are repairing.
*/
lock_mode = xfs_ilock_data_map_shared(dp);
error = xrep_dinode_trylock_directory(ri, dp, &lock_mode);
if (error)
return error;
/*
* If this directory is known to be sick, we cannot scan it reliably
......@@ -356,6 +403,7 @@ xrep_dinode_find_mode(
* so there's a real possibility that _iscan_iter can return EBUSY.
*/
xchk_iscan_start(sc, 5000, 100, &ri->ftype_iscan);
xchk_iscan_set_agi_trylock(&ri->ftype_iscan);
ri->ftype_iscan.skip_ino = sc->sm->sm_ino;
ri->alleged_ftype = XFS_DIR3_FT_UNKNOWN;
while ((error = xchk_iscan_iter(&ri->ftype_iscan, &dp)) == 1) {
......
......@@ -243,6 +243,40 @@ xchk_iscan_finish(
mutex_unlock(&iscan->lock);
}
/*
* Grab the AGI to advance the inode scan. Returns 0 if *agi_bpp is now set,
* -ECANCELED if the live scan aborted, -EBUSY if the AGI could not be grabbed,
* or the usual negative errno.
*/
STATIC int
xchk_iscan_read_agi(
struct xchk_iscan *iscan,
struct xfs_perag *pag,
struct xfs_buf **agi_bpp)
{
struct xfs_scrub *sc = iscan->sc;
unsigned long relax;
int ret;
if (!xchk_iscan_agi_needs_trylock(iscan))
return xfs_ialloc_read_agi(pag, sc->tp, 0, agi_bpp);
relax = msecs_to_jiffies(iscan->iget_retry_delay);
do {
ret = xfs_ialloc_read_agi(pag, sc->tp, XFS_IALLOC_FLAG_TRYLOCK,
agi_bpp);
if (ret != -EAGAIN)
return ret;
if (!iscan->iget_timeout ||
time_is_before_jiffies(iscan->__iget_deadline))
return -EBUSY;
trace_xchk_iscan_agi_retry_wait(iscan);
} while (!schedule_timeout_killable(relax) &&
!xchk_iscan_aborted(iscan));
return -ECANCELED;
}
/*
* Advance ino to the next inode that the inobt thinks is allocated, being
* careful to jump to the next AG if we've reached the right end of this AG's
......@@ -281,7 +315,7 @@ xchk_iscan_advance(
if (!pag)
return -ECANCELED;
ret = xfs_ialloc_read_agi(pag, sc->tp, &agi_bp);
ret = xchk_iscan_read_agi(iscan, pag, &agi_bp);
if (ret)
goto out_pag;
......
......@@ -59,6 +59,9 @@ struct xchk_iscan {
/* Set if the scan has been aborted due to some event in the fs. */
#define XCHK_ISCAN_OPSTATE_ABORTED (1)
/* Use trylock to acquire the AGI */
#define XCHK_ISCAN_OPSTATE_TRYLOCK_AGI (2)
static inline bool
xchk_iscan_aborted(const struct xchk_iscan *iscan)
{
......@@ -71,6 +74,18 @@ xchk_iscan_abort(struct xchk_iscan *iscan)
set_bit(XCHK_ISCAN_OPSTATE_ABORTED, &iscan->__opstate);
}
static inline bool
xchk_iscan_agi_needs_trylock(const struct xchk_iscan *iscan)
{
return test_bit(XCHK_ISCAN_OPSTATE_TRYLOCK_AGI, &iscan->__opstate);
}
static inline void
xchk_iscan_set_agi_trylock(struct xchk_iscan *iscan)
{
set_bit(XCHK_ISCAN_OPSTATE_TRYLOCK_AGI, &iscan->__opstate);
}
void xchk_iscan_start(struct xfs_scrub *sc, unsigned int iget_timeout,
unsigned int iget_retry_delay, struct xchk_iscan *iscan);
void xchk_iscan_teardown(struct xchk_iscan *iscan);
......
......@@ -290,7 +290,7 @@ xrep_calc_ag_resblks(
icount = pag->pagi_count;
} else {
/* Try to get the actual counters from disk. */
error = xfs_ialloc_read_agi(pag, NULL, &bp);
error = xfs_ialloc_read_agi(pag, NULL, 0, &bp);
if (!error) {
icount = pag->pagi_count;
xfs_buf_relse(bp);
......@@ -908,7 +908,7 @@ xrep_reinit_pagi(
ASSERT(xfs_perag_initialised_agi(pag));
clear_bit(XFS_AGSTATE_AGI_INIT, &pag->pag_opstate);
error = xfs_ialloc_read_agi(pag, sc->tp, &bp);
error = xfs_ialloc_read_agi(pag, sc->tp, 0, &bp);
if (error)
return error;
......@@ -934,7 +934,7 @@ xrep_ag_init(
ASSERT(!sa->pag);
error = xfs_ialloc_read_agi(pag, sc->tp, &sa->agi_bp);
error = xfs_ialloc_read_agi(pag, sc->tp, 0, &sa->agi_bp);
if (error)
return error;
......
......@@ -1300,7 +1300,7 @@ TRACE_EVENT(xchk_iscan_iget_batch,
__entry->unavail)
);
TRACE_EVENT(xchk_iscan_iget_retry_wait,
DECLARE_EVENT_CLASS(xchk_iscan_retry_wait_class,
TP_PROTO(struct xchk_iscan *iscan),
TP_ARGS(iscan),
TP_STRUCT__entry(
......@@ -1326,7 +1326,13 @@ TRACE_EVENT(xchk_iscan_iget_retry_wait,
__entry->remaining,
__entry->iget_timeout,
__entry->retry_delay)
);
)
#define DEFINE_ISCAN_RETRY_WAIT_EVENT(name) \
DEFINE_EVENT(xchk_iscan_retry_wait_class, name, \
TP_PROTO(struct xchk_iscan *iscan), \
TP_ARGS(iscan))
DEFINE_ISCAN_RETRY_WAIT_EVENT(xchk_iscan_iget_retry_wait);
DEFINE_ISCAN_RETRY_WAIT_EVENT(xchk_iscan_agi_retry_wait);
TRACE_EVENT(xchk_nlinks_collect_dirent,
TP_PROTO(struct xfs_mount *mp, struct xfs_inode *dp,
......
......@@ -2167,7 +2167,7 @@ xfs_iunlink(
pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
/* Get the agi buffer first. It ensures lock ordering on the list. */
error = xfs_read_agi(pag, tp, &agibp);
error = xfs_read_agi(pag, tp, 0, &agibp);
if (error)
goto out;
......@@ -2264,7 +2264,7 @@ xfs_iunlink_remove(
trace_xfs_iunlink_remove(ip);
/* Get the agi buffer first. It ensures lock ordering on the list. */
error = xfs_read_agi(pag, tp, &agibp);
error = xfs_read_agi(pag, tp, 0, &agibp);
if (error)
return error;
......@@ -3142,7 +3142,7 @@ xfs_rename(
pag = xfs_perag_get(mp,
XFS_INO_TO_AGNO(mp, inodes[i]->i_ino));
error = xfs_read_agi(pag, tp, &bp);
error = xfs_read_agi(pag, tp, 0, &bp);
xfs_perag_put(pag);
if (error)
goto out_trans_cancel;
......@@ -3814,7 +3814,7 @@ xfs_inode_reload_unlinked_bucket(
/* Grab the first inode in the list */
pag = xfs_perag_get(mp, agno);
error = xfs_ialloc_read_agi(pag, tp, &agibp);
error = xfs_ialloc_read_agi(pag, tp, 0, &agibp);
xfs_perag_put(pag);
if (error)
return error;
......
......@@ -268,7 +268,7 @@ xfs_iwalk_ag_start(
/* Set up a fresh cursor and empty the inobt cache. */
iwag->nr_recs = 0;
error = xfs_ialloc_read_agi(pag, tp, agi_bpp);
error = xfs_ialloc_read_agi(pag, tp, 0, agi_bpp);
if (error)
return error;
*curpp = xfs_inobt_init_cursor(pag, tp, *agi_bpp);
......@@ -386,7 +386,7 @@ xfs_iwalk_run_callbacks(
}
/* ...and recreate the cursor just past where we left off. */
error = xfs_ialloc_read_agi(iwag->pag, iwag->tp, agi_bpp);
error = xfs_ialloc_read_agi(iwag->pag, iwag->tp, 0, agi_bpp);
if (error)
return error;
*curpp = xfs_inobt_init_cursor(iwag->pag, iwag->tp, *agi_bpp);
......
......@@ -1448,7 +1448,7 @@ xfs_log_work_queue(
* Clear the log incompat flags if we have the opportunity.
*
* This only happens if we're about to log the second dummy transaction as part
* of covering the log and we can get the log incompat feature usage lock.
* of covering the log.
*/
static inline void
xlog_clear_incompat(
......@@ -1463,11 +1463,7 @@ xlog_clear_incompat(
if (log->l_covered_state != XLOG_STATE_COVER_DONE2)
return;
if (!down_write_trylock(&log->l_incompat_users))
return;
xfs_clear_incompat_log_features(mp);
up_write(&log->l_incompat_users);
}
/*
......@@ -1585,8 +1581,6 @@ xlog_alloc_log(
}
log->l_sectBBsize = 1 << log2_size;
init_rwsem(&log->l_incompat_users);
xlog_get_iclog_buffer_size(mp, log);
spin_lock_init(&log->l_icloglock);
......@@ -3871,23 +3865,3 @@ xfs_log_check_lsn(
return valid;
}
/*
* Notify the log that we're about to start using a feature that is protected
* by a log incompat feature flag. This will prevent log covering from
* clearing those flags.
*/
void
xlog_use_incompat_feat(
struct xlog *log)
{
down_read(&log->l_incompat_users);
}
/* Notify the log that we've finished using log incompat features. */
void
xlog_drop_incompat_feat(
struct xlog *log)
{
up_read(&log->l_incompat_users);
}
......@@ -159,8 +159,6 @@ bool xfs_log_check_lsn(struct xfs_mount *, xfs_lsn_t);
xfs_lsn_t xlog_grant_push_threshold(struct xlog *log, int need_bytes);
bool xlog_force_shutdown(struct xlog *log, uint32_t shutdown_flags);
void xlog_use_incompat_feat(struct xlog *log);
void xlog_drop_incompat_feat(struct xlog *log);
int xfs_attr_use_log_assist(struct xfs_mount *mp);
#endif /* __XFS_LOG_H__ */
......@@ -450,9 +450,6 @@ struct xlog {
xfs_lsn_t l_recovery_lsn;
uint32_t l_iclog_roundoff;/* padding roundoff */
/* Users of log incompat features should take a read lock. */
struct rw_semaphore l_incompat_users;
};
/*
......
......@@ -2656,7 +2656,7 @@ xlog_recover_clear_agi_bucket(
if (error)
goto out_error;
error = xfs_read_agi(pag, tp, &agibp);
error = xfs_read_agi(pag, tp, 0, &agibp);
if (error)
goto out_abort;
......@@ -2772,7 +2772,7 @@ xlog_recover_iunlink_ag(
int bucket;
int error;
error = xfs_read_agi(pag, NULL, &agibp);
error = xfs_read_agi(pag, NULL, 0, &agibp);
if (error) {
/*
* AGI is b0rked. Don't process it.
......@@ -3496,21 +3496,6 @@ xlog_recover_finish(
*/
xfs_log_force(log->l_mp, XFS_LOG_SYNC);
/*
* Now that we've recovered the log and all the intents, we can clear
* the log incompat feature bits in the superblock because there's no
* longer anything to protect. We rely on the AIL push to write out the
* updated superblock after everything else.
*/
if (xfs_clear_incompat_log_features(log->l_mp)) {
error = xfs_sync_sb(log->l_mp, false);
if (error < 0) {
xfs_alert(log->l_mp,
"Failed to clear log incompat features on recovery");
goto out_error;
}
}
xlog_recover_process_iunlinks(log);
/*
......
......@@ -1095,6 +1095,11 @@ xfs_unmountfs(
"Freespace may not be correct on next mount.");
xfs_unmount_check(mp);
/*
* Indicate that it's ok to clear log incompat bits before cleaning
* the log and writing the unmount record.
*/
xfs_set_done_with_log_incompat(mp);
xfs_log_unmount(mp);
xfs_da_unmount(mp);
xfs_uuid_unmount(mp);
......@@ -1364,7 +1369,8 @@ xfs_clear_incompat_log_features(
if (!xfs_has_crc(mp) ||
!xfs_sb_has_incompat_log_feature(&mp->m_sb,
XFS_SB_FEAT_INCOMPAT_LOG_ALL) ||
xfs_is_shutdown(mp))
xfs_is_shutdown(mp) ||
!xfs_is_done_with_log_incompat(mp))
return false;
/*
......
......@@ -412,6 +412,8 @@ __XFS_HAS_FEAT(nouuid, NOUUID)
#define XFS_OPSTATE_WARNED_LARP 9
/* Mount time quotacheck is running */
#define XFS_OPSTATE_QUOTACHECK_RUNNING 10
/* Do we want to clear log incompat flags? */
#define XFS_OPSTATE_UNSET_LOG_INCOMPAT 11
#define __XFS_IS_OPSTATE(name, NAME) \
static inline bool xfs_is_ ## name (struct xfs_mount *mp) \
......@@ -439,6 +441,7 @@ __XFS_IS_OPSTATE(quotacheck_running, QUOTACHECK_RUNNING)
#else
# define xfs_is_quotacheck_running(mp) (false)
#endif
__XFS_IS_OPSTATE(done_with_log_incompat, UNSET_LOG_INCOMPAT)
static inline bool
xfs_should_warn(struct xfs_mount *mp, long nr)
......@@ -457,7 +460,8 @@ xfs_should_warn(struct xfs_mount *mp, long nr)
{ (1UL << XFS_OPSTATE_WARNED_SCRUB), "wscrub" }, \
{ (1UL << XFS_OPSTATE_WARNED_SHRINK), "wshrink" }, \
{ (1UL << XFS_OPSTATE_WARNED_LARP), "wlarp" }, \
{ (1UL << XFS_OPSTATE_QUOTACHECK_RUNNING), "quotacheck" }
{ (1UL << XFS_OPSTATE_QUOTACHECK_RUNNING), "quotacheck" }, \
{ (1UL << XFS_OPSTATE_UNSET_LOG_INCOMPAT), "unset_log_incompat" }
/*
* Max and min values for mount-option defined I/O
......
......@@ -22,10 +22,7 @@
/*
* Get permission to use log-assisted atomic exchange of file extents.
*
* Callers must not be running any transactions or hold any inode locks, and
* they must release the permission by calling xlog_drop_incompat_feat
* when they're done.
* Callers must not be running any transactions or hold any ILOCKs.
*/
static inline int
xfs_attr_grab_log_assist(
......@@ -33,16 +30,7 @@ xfs_attr_grab_log_assist(
{
int error = 0;
/*
* Protect ourselves from an idle log clearing the logged xattrs log
* incompat feature bit.
*/
xlog_use_incompat_feat(mp->m_log);
/*
* If log-assisted xattrs are already enabled, the caller can use the
* log assisted swap functions with the log-incompat reference we got.
*/
/* xattr update log intent items are already enabled */
if (xfs_sb_version_haslogxattrs(&mp->m_sb))
return 0;
......@@ -52,31 +40,19 @@ xfs_attr_grab_log_assist(
* a V5 filesystem for the superblock field, but we'll require rmap
* or reflink to avoid having to deal with really old kernels.
*/
if (!xfs_has_reflink(mp) && !xfs_has_rmapbt(mp)) {
error = -EOPNOTSUPP;
goto drop_incompat;
}
if (!xfs_has_reflink(mp) && !xfs_has_rmapbt(mp))
return -EOPNOTSUPP;
/* Enable log-assisted xattrs. */
error = xfs_add_incompat_log_feature(mp,
XFS_SB_FEAT_INCOMPAT_LOG_XATTRS);
if (error)
goto drop_incompat;
return error;
xfs_warn_mount(mp, XFS_OPSTATE_WARNED_LARP,
"EXPERIMENTAL logged extended attributes feature in use. Use at your own risk!");
return 0;
drop_incompat:
xlog_drop_incompat_feat(mp->m_log);
return error;
}
static inline void
xfs_attr_rele_log_assist(
struct xfs_mount *mp)
{
xlog_drop_incompat_feat(mp->m_log);
}
static inline bool
......@@ -100,7 +76,6 @@ xfs_attr_change(
struct xfs_da_args *args)
{
struct xfs_mount *mp = args->dp->i_mount;
bool use_logging = false;
int error;
ASSERT(!(args->op_flags & XFS_DA_OP_LOGGED));
......@@ -111,14 +86,9 @@ xfs_attr_change(
return error;
args->op_flags |= XFS_DA_OP_LOGGED;
use_logging = true;
}
error = xfs_attr_set(args);
if (use_logging)
xfs_attr_rele_log_assist(mp);
return error;
return xfs_attr_set(args);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment