Commit 736a4c11 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-v3.9-rc1' of git://oss.sgi.com/xfs/xfs

Pull xfs update from Ben Myers:
 "Primarily bugfixes and a few cleanups:

   - fix(es) for compound buffers

   - remove unused XFS_TRANS_DEBUG routines

   - fix for dquot soft timer asserts due to overflow of d_blk_softlimit

   - don't zero allocation args structure members after they are memset(0)

   - fix for regression in dir v2 code introduced in commit 20f7e9f3

   - remove obsolete simple_strto<foo>

   - fix return value when filesystem probe finds no XFS magic, a
     regression introduced in 98021821.

   - remove boolean_t typedef completely

   - fix stack switch in __xfs_bmapi_allocate by moving the check for
     stack switch up into xfs_bmapi_write.

   - fix build error due to incomplete boolean_t removal

   - fix oops in _xfs_buf_find by validating that the requested block is
     within the filesystem bounds.

   - limit speculative preallocation near ENOSPC.

   - fix an unmount hang in xfs_wait_buftarg by freeing the
     xfs_buf_log_item in xfs_buf_item_unlock.

   - fix a possible use after free with AIO.

   - fix xfs_swap_extents after removal of xfs_flushinval_pages, a
     regression introduced in fb595814.

   - replace hardcoded 128 with log header size

   - add memory barrier before wake_up_bit in xfs_ifunlock

   - limit speculative preallocation on sparse files

   - fix xa_lock recursion bug introduced in 90810b9e

   - fix write verifier for symlinks"

Fixed up conflicts in fs/xfs/xfs_buf_item.c (due to bli_format rename in
commit 0f22f9d0 affecting the removed XFS_TRANS_DEBUG routines in
commit ec47eb6b).

* tag 'for-linus-v3.9-rc1' of git://oss.sgi.com/xfs/xfs: (36 commits)
  xfs: xfs_bmap_add_attrfork_local is too generic
  xfs: remove log force from xfs_buf_trylock()
  xfs: recheck buffer pinned status after push trylock failure
  xfs: limit speculative prealloc size on sparse files
  xfs: memory barrier before wake_up_bit()
  xfs: refactor space log reservation for XFS_TRANS_ATTR_SET
  xfs: make use of XFS_SB_LOG_RES() at xfs_fs_log_dummy()
  xfs: make use of XFS_SB_LOG_RES() at xfs_mount_log_sb()
  xfs: make use of XFS_SB_LOG_RES() at xfs_log_sbcount()
  xfs: introduce XFS_SB_LOG_RES() for transactions that modify sb on disk
  xfs: calculate XFS_TRANS_QM_QUOTAOFF_END space log reservation at mount time
  xfs: calculate XFS_TRANS_QM_QUOTAOFF space log reservation at mount time
  xfs: calculate XFS_TRANS_QM_DQALLOC space log reservation at mount time
  xfs: calcuate XFS_TRANS_QM_SETQLIM space log reservation at mount time
  xfs: calculate xfs_qm_write_sb_changes() space log reservation at mount time
  xfs: calculate XFS_TRANS_QM_SBCHANGE space log reservation at mount time
  xfs: make use of xfs_calc_buf_res() in xfs_trans.c
  xfs: add a helper to figure out the space log reservation per item
  xfs: Fix xfs_swap_extents() after removal of xfs_flushinval_pages()
  xfs: Fix possible use-after-free with AIO
  ...
parents c4bc705e 1e82379b
...@@ -1925,8 +1925,6 @@ xfs_alloc_fix_freelist( ...@@ -1925,8 +1925,6 @@ xfs_alloc_fix_freelist(
targs.mp = mp; targs.mp = mp;
targs.agbp = agbp; targs.agbp = agbp;
targs.agno = args->agno; targs.agno = args->agno;
targs.mod = targs.minleft = targs.wasdel = targs.userdata =
targs.minalignslop = 0;
targs.alignment = targs.minlen = targs.prod = targs.isfl = 1; targs.alignment = targs.minlen = targs.prod = targs.isfl = 1;
targs.type = XFS_ALLOCTYPE_THIS_AG; targs.type = XFS_ALLOCTYPE_THIS_AG;
targs.pag = pag; targs.pag = pag;
......
...@@ -300,9 +300,12 @@ xfs_attr_set_int( ...@@ -300,9 +300,12 @@ xfs_attr_set_int(
if (rsvd) if (rsvd)
args.trans->t_flags |= XFS_TRANS_RESERVE; args.trans->t_flags |= XFS_TRANS_RESERVE;
if ((error = xfs_trans_reserve(args.trans, args.total, error = xfs_trans_reserve(args.trans, args.total,
XFS_ATTRSET_LOG_RES(mp, args.total), 0, XFS_ATTRSETM_LOG_RES(mp) +
XFS_TRANS_PERM_LOG_RES, XFS_ATTRSET_LOG_COUNT))) { XFS_ATTRSETRT_LOG_RES(mp) * args.total,
0, XFS_TRANS_PERM_LOG_RES,
XFS_ATTRSET_LOG_COUNT);
if (error) {
xfs_trans_cancel(args.trans, 0); xfs_trans_cancel(args.trans, 0);
return(error); return(error);
} }
......
...@@ -147,7 +147,10 @@ xfs_bmap_local_to_extents( ...@@ -147,7 +147,10 @@ xfs_bmap_local_to_extents(
xfs_fsblock_t *firstblock, /* first block allocated in xaction */ xfs_fsblock_t *firstblock, /* first block allocated in xaction */
xfs_extlen_t total, /* total blocks needed by transaction */ xfs_extlen_t total, /* total blocks needed by transaction */
int *logflagsp, /* inode logging flags */ int *logflagsp, /* inode logging flags */
int whichfork); /* data or attr fork */ int whichfork, /* data or attr fork */
void (*init_fn)(struct xfs_buf *bp,
struct xfs_inode *ip,
struct xfs_ifork *ifp));
/* /*
* Search the extents list for the inode, for the extent containing bno. * Search the extents list for the inode, for the extent containing bno.
...@@ -357,7 +360,42 @@ xfs_bmap_add_attrfork_extents( ...@@ -357,7 +360,42 @@ xfs_bmap_add_attrfork_extents(
} }
/* /*
* Called from xfs_bmap_add_attrfork to handle local format files. * Block initialisation functions for local to extent format conversion.
* As these get more complex, they will be moved to the relevant files,
* but for now they are too simple to worry about.
*/
STATIC void
xfs_bmap_local_to_extents_init_fn(
struct xfs_buf *bp,
struct xfs_inode *ip,
struct xfs_ifork *ifp)
{
bp->b_ops = &xfs_bmbt_buf_ops;
memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
}
STATIC void
xfs_symlink_local_to_remote(
struct xfs_buf *bp,
struct xfs_inode *ip,
struct xfs_ifork *ifp)
{
/* remote symlink blocks are not verifiable until CRCs come along */
bp->b_ops = NULL;
memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes);
}
/*
* Called from xfs_bmap_add_attrfork to handle local format files. Each
* different data fork content type needs a different callout to do the
* conversion. Some are basic and only require special block initialisation
* callouts for the data formating, others (directories) are so specialised they
* handle everything themselves.
*
* XXX (dgc): investigate whether directory conversion can use the generic
* formatting callout. It should be possible - it's just a very complex
* formatter. it would also require passing the transaction through to the init
* function.
*/ */
STATIC int /* error */ STATIC int /* error */
xfs_bmap_add_attrfork_local( xfs_bmap_add_attrfork_local(
...@@ -368,25 +406,29 @@ xfs_bmap_add_attrfork_local( ...@@ -368,25 +406,29 @@ xfs_bmap_add_attrfork_local(
int *flags) /* inode logging flags */ int *flags) /* inode logging flags */
{ {
xfs_da_args_t dargs; /* args for dir/attr code */ xfs_da_args_t dargs; /* args for dir/attr code */
int error; /* error return value */
xfs_mount_t *mp; /* mount structure pointer */
if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip)) if (ip->i_df.if_bytes <= XFS_IFORK_DSIZE(ip))
return 0; return 0;
if (S_ISDIR(ip->i_d.di_mode)) { if (S_ISDIR(ip->i_d.di_mode)) {
mp = ip->i_mount;
memset(&dargs, 0, sizeof(dargs)); memset(&dargs, 0, sizeof(dargs));
dargs.dp = ip; dargs.dp = ip;
dargs.firstblock = firstblock; dargs.firstblock = firstblock;
dargs.flist = flist; dargs.flist = flist;
dargs.total = mp->m_dirblkfsbs; dargs.total = ip->i_mount->m_dirblkfsbs;
dargs.whichfork = XFS_DATA_FORK; dargs.whichfork = XFS_DATA_FORK;
dargs.trans = tp; dargs.trans = tp;
error = xfs_dir2_sf_to_block(&dargs); return xfs_dir2_sf_to_block(&dargs);
} else }
error = xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags,
XFS_DATA_FORK); if (S_ISLNK(ip->i_d.di_mode))
return error; return xfs_bmap_local_to_extents(tp, ip, firstblock, 1,
flags, XFS_DATA_FORK,
xfs_symlink_local_to_remote);
return xfs_bmap_local_to_extents(tp, ip, firstblock, 1, flags,
XFS_DATA_FORK,
xfs_bmap_local_to_extents_init_fn);
} }
/* /*
...@@ -3099,8 +3141,6 @@ xfs_bmap_extents_to_btree( ...@@ -3099,8 +3141,6 @@ xfs_bmap_extents_to_btree(
args.fsbno = *firstblock; args.fsbno = *firstblock;
} }
args.minlen = args.maxlen = args.prod = 1; args.minlen = args.maxlen = args.prod = 1;
args.total = args.minleft = args.alignment = args.mod = args.isfl =
args.minalignslop = 0;
args.wasdel = wasdel; args.wasdel = wasdel;
*logflagsp = 0; *logflagsp = 0;
if ((error = xfs_alloc_vextent(&args))) { if ((error = xfs_alloc_vextent(&args))) {
...@@ -3221,7 +3261,10 @@ xfs_bmap_local_to_extents( ...@@ -3221,7 +3261,10 @@ xfs_bmap_local_to_extents(
xfs_fsblock_t *firstblock, /* first block allocated in xaction */ xfs_fsblock_t *firstblock, /* first block allocated in xaction */
xfs_extlen_t total, /* total blocks needed by transaction */ xfs_extlen_t total, /* total blocks needed by transaction */
int *logflagsp, /* inode logging flags */ int *logflagsp, /* inode logging flags */
int whichfork) /* data or attr fork */ int whichfork,
void (*init_fn)(struct xfs_buf *bp,
struct xfs_inode *ip,
struct xfs_ifork *ifp))
{ {
int error; /* error return value */ int error; /* error return value */
int flags; /* logging flags returned */ int flags; /* logging flags returned */
...@@ -3241,12 +3284,12 @@ xfs_bmap_local_to_extents( ...@@ -3241,12 +3284,12 @@ xfs_bmap_local_to_extents(
xfs_buf_t *bp; /* buffer for extent block */ xfs_buf_t *bp; /* buffer for extent block */
xfs_bmbt_rec_host_t *ep;/* extent record pointer */ xfs_bmbt_rec_host_t *ep;/* extent record pointer */
ASSERT((ifp->if_flags &
(XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) == XFS_IFINLINE);
memset(&args, 0, sizeof(args)); memset(&args, 0, sizeof(args));
args.tp = tp; args.tp = tp;
args.mp = ip->i_mount; args.mp = ip->i_mount;
args.firstblock = *firstblock; args.firstblock = *firstblock;
ASSERT((ifp->if_flags &
(XFS_IFINLINE|XFS_IFEXTENTS|XFS_IFEXTIREC)) == XFS_IFINLINE);
/* /*
* Allocate a block. We know we need only one, since the * Allocate a block. We know we need only one, since the
* file currently fits in an inode. * file currently fits in an inode.
...@@ -3259,20 +3302,21 @@ xfs_bmap_local_to_extents( ...@@ -3259,20 +3302,21 @@ xfs_bmap_local_to_extents(
args.type = XFS_ALLOCTYPE_NEAR_BNO; args.type = XFS_ALLOCTYPE_NEAR_BNO;
} }
args.total = total; args.total = total;
args.mod = args.minleft = args.alignment = args.wasdel =
args.isfl = args.minalignslop = 0;
args.minlen = args.maxlen = args.prod = 1; args.minlen = args.maxlen = args.prod = 1;
if ((error = xfs_alloc_vextent(&args))) error = xfs_alloc_vextent(&args);
if (error)
goto done; goto done;
/*
* Can't fail, the space was reserved. /* Can't fail, the space was reserved. */
*/
ASSERT(args.fsbno != NULLFSBLOCK); ASSERT(args.fsbno != NULLFSBLOCK);
ASSERT(args.len == 1); ASSERT(args.len == 1);
*firstblock = args.fsbno; *firstblock = args.fsbno;
bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0); bp = xfs_btree_get_bufl(args.mp, tp, args.fsbno, 0);
bp->b_ops = &xfs_bmbt_buf_ops;
memcpy(bp->b_addr, ifp->if_u1.if_data, ifp->if_bytes); /* initialise the block and copy the data */
init_fn(bp, ip, ifp);
/* account for the change in fork size and log everything */
xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1); xfs_trans_log_buf(tp, bp, 0, ifp->if_bytes - 1);
xfs_bmap_forkoff_reset(args.mp, ip, whichfork); xfs_bmap_forkoff_reset(args.mp, ip, whichfork);
xfs_idata_realloc(ip, -ifp->if_bytes, whichfork); xfs_idata_realloc(ip, -ifp->if_bytes, whichfork);
...@@ -4919,8 +4963,32 @@ xfs_bmapi_write( ...@@ -4919,8 +4963,32 @@ xfs_bmapi_write(
XFS_STATS_INC(xs_blk_mapw); XFS_STATS_INC(xs_blk_mapw);
if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) { if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
/*
* XXX (dgc): This assumes we are only called for inodes that
* contain content neutral data in local format. Anything that
* contains caller-specific data in local format that needs
* transformation to move to a block format needs to do the
* conversion to extent format itself.
*
* Directory data forks and attribute forks handle this
* themselves, but with the addition of metadata verifiers every
* data fork in local format now contains caller specific data
* and as such conversion through this function is likely to be
* broken.
*
* The only likely user of this branch is for remote symlinks,
* but we cannot overwrite the data fork contents of the symlink
* (EEXIST occurs higher up the stack) and so it will never go
* from local format to extent format here. Hence I don't think
* this branch is ever executed intentionally and we should
* consider removing it and asserting that xfs_bmapi_write()
* cannot be called directly on local format forks. i.e. callers
* are completely responsible for local to extent format
* conversion, not xfs_bmapi_write().
*/
error = xfs_bmap_local_to_extents(tp, ip, firstblock, total, error = xfs_bmap_local_to_extents(tp, ip, firstblock, total,
&bma.logflags, whichfork); &bma.logflags, whichfork,
xfs_bmap_local_to_extents_init_fn);
if (error) if (error)
goto error0; goto error0;
} }
......
...@@ -951,8 +951,6 @@ xfs_buf_trylock( ...@@ -951,8 +951,6 @@ xfs_buf_trylock(
locked = down_trylock(&bp->b_sema) == 0; locked = down_trylock(&bp->b_sema) == 0;
if (locked) if (locked)
XB_SET_OWNER(bp); XB_SET_OWNER(bp);
else if (atomic_read(&bp->b_pin_count) && (bp->b_flags & XBF_STALE))
xfs_log_force(bp->b_target->bt_mount, 0);
trace_xfs_buf_trylock(bp, _RET_IP_); trace_xfs_buf_trylock(bp, _RET_IP_);
return locked; return locked;
......
...@@ -37,109 +37,6 @@ static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip) ...@@ -37,109 +37,6 @@ static inline struct xfs_buf_log_item *BUF_ITEM(struct xfs_log_item *lip)
return container_of(lip, struct xfs_buf_log_item, bli_item); return container_of(lip, struct xfs_buf_log_item, bli_item);
} }
#ifdef XFS_TRANS_DEBUG
/*
* This function uses an alternate strategy for tracking the bytes
* that the user requests to be logged. This can then be used
* in conjunction with the bli_orig array in the buf log item to
* catch bugs in our callers' code.
*
* We also double check the bits set in xfs_buf_item_log using a
* simple algorithm to check that every byte is accounted for.
*/
STATIC void
xfs_buf_item_log_debug(
xfs_buf_log_item_t *bip,
uint first,
uint last)
{
uint x;
uint byte;
uint nbytes;
uint chunk_num;
uint word_num;
uint bit_num;
uint bit_set;
uint *wordp;
ASSERT(bip->bli_logged != NULL);
byte = first;
nbytes = last - first + 1;
bfset(bip->bli_logged, first, nbytes);
for (x = 0; x < nbytes; x++) {
chunk_num = byte >> XFS_BLF_SHIFT;
word_num = chunk_num >> BIT_TO_WORD_SHIFT;
bit_num = chunk_num & (NBWORD - 1);
wordp = &(bip->__bli_format.blf_data_map[word_num]);
bit_set = *wordp & (1 << bit_num);
ASSERT(bit_set);
byte++;
}
}
/*
* This function is called when we flush something into a buffer without
* logging it. This happens for things like inodes which are logged
* separately from the buffer.
*/
void
xfs_buf_item_flush_log_debug(
xfs_buf_t *bp,
uint first,
uint last)
{
xfs_buf_log_item_t *bip = bp->b_fspriv;
uint nbytes;
if (bip == NULL || (bip->bli_item.li_type != XFS_LI_BUF))
return;
ASSERT(bip->bli_logged != NULL);
nbytes = last - first + 1;
bfset(bip->bli_logged, first, nbytes);
}
/*
* This function is called to verify that our callers have logged
* all the bytes that they changed.
*
* It does this by comparing the original copy of the buffer stored in
* the buf log item's bli_orig array to the current copy of the buffer
* and ensuring that all bytes which mismatch are set in the bli_logged
* array of the buf log item.
*/
STATIC void
xfs_buf_item_log_check(
xfs_buf_log_item_t *bip)
{
char *orig;
char *buffer;
int x;
xfs_buf_t *bp;
ASSERT(bip->bli_orig != NULL);
ASSERT(bip->bli_logged != NULL);
bp = bip->bli_buf;
ASSERT(bp->b_length > 0);
ASSERT(bp->b_addr != NULL);
orig = bip->bli_orig;
buffer = bp->b_addr;
for (x = 0; x < BBTOB(bp->b_length); x++) {
if (orig[x] != buffer[x] && !btst(bip->bli_logged, x)) {
xfs_emerg(bp->b_mount,
"%s: bip %x buffer %x orig %x index %d",
__func__, bip, bp, orig, x);
ASSERT(0);
}
}
}
#else
#define xfs_buf_item_log_debug(x,y,z)
#define xfs_buf_item_log_check(x)
#endif
STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp); STATIC void xfs_buf_do_callbacks(struct xfs_buf *bp);
/* /*
...@@ -429,7 +326,6 @@ xfs_buf_item_format( ...@@ -429,7 +326,6 @@ xfs_buf_item_format(
* Check to make sure everything is consistent. * Check to make sure everything is consistent.
*/ */
trace_xfs_buf_item_format(bip); trace_xfs_buf_item_format(bip);
xfs_buf_item_log_check(bip);
} }
/* /*
...@@ -573,8 +469,18 @@ xfs_buf_item_push( ...@@ -573,8 +469,18 @@ xfs_buf_item_push(
if (xfs_buf_ispinned(bp)) if (xfs_buf_ispinned(bp))
return XFS_ITEM_PINNED; return XFS_ITEM_PINNED;
if (!xfs_buf_trylock(bp)) if (!xfs_buf_trylock(bp)) {
/*
* If we have just raced with a buffer being pinned and it has
* been marked stale, we could end up stalling until someone else
* issues a log force to unpin the stale buffer. Check for the
* race condition here so xfsaild recognizes the buffer is pinned
* and queues a log force to move it along.
*/
if (xfs_buf_ispinned(bp))
return XFS_ITEM_PINNED;
return XFS_ITEM_LOCKED; return XFS_ITEM_LOCKED;
}
ASSERT(!(bip->bli_flags & XFS_BLI_STALE)); ASSERT(!(bip->bli_flags & XFS_BLI_STALE));
...@@ -923,8 +829,6 @@ xfs_buf_item_log_segment( ...@@ -923,8 +829,6 @@ xfs_buf_item_log_segment(
mask = (1 << end_bit) - 1; mask = (1 << end_bit) - 1;
*wordp |= mask; *wordp |= mask;
} }
xfs_buf_item_log_debug(bip, first, last);
} }
/* /*
......
...@@ -98,10 +98,6 @@ typedef struct xfs_buf_log_item { ...@@ -98,10 +98,6 @@ typedef struct xfs_buf_log_item {
unsigned int bli_flags; /* misc flags */ unsigned int bli_flags; /* misc flags */
unsigned int bli_recur; /* lock recursion count */ unsigned int bli_recur; /* lock recursion count */
atomic_t bli_refcount; /* cnt of tp refs */ atomic_t bli_refcount; /* cnt of tp refs */
#ifdef XFS_TRANS_DEBUG
char *bli_orig; /* original buffer copy */
char *bli_logged; /* bytes logged (bitmap) */
#endif
int bli_format_count; /* count of headers */ int bli_format_count; /* count of headers */
struct xfs_buf_log_format *bli_formats; /* array of in-log header ptrs */ struct xfs_buf_log_format *bli_formats; /* array of in-log header ptrs */
struct xfs_buf_log_format __bli_format; /* embedded in-log header */ struct xfs_buf_log_format __bli_format; /* embedded in-log header */
...@@ -117,16 +113,6 @@ void xfs_buf_attach_iodone(struct xfs_buf *, ...@@ -117,16 +113,6 @@ void xfs_buf_attach_iodone(struct xfs_buf *,
void xfs_buf_iodone_callbacks(struct xfs_buf *); void xfs_buf_iodone_callbacks(struct xfs_buf *);
void xfs_buf_iodone(struct xfs_buf *, struct xfs_log_item *); void xfs_buf_iodone(struct xfs_buf *, struct xfs_log_item *);
#ifdef XFS_TRANS_DEBUG
void
xfs_buf_item_flush_log_debug(
struct xfs_buf *bp,
uint first,
uint last);
#else
#define xfs_buf_item_flush_log_debug(bp, first, last)
#endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* __XFS_BUF_ITEM_H__ */ #endif /* __XFS_BUF_ITEM_H__ */
...@@ -612,15 +612,9 @@ xfs_qm_dqread( ...@@ -612,15 +612,9 @@ xfs_qm_dqread(
if (flags & XFS_QMOPT_DQALLOC) { if (flags & XFS_QMOPT_DQALLOC) {
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC); tp = xfs_trans_alloc(mp, XFS_TRANS_QM_DQALLOC);
error = xfs_trans_reserve(tp, XFS_QM_DQALLOC_SPACE_RES(mp), error = xfs_trans_reserve(tp, XFS_QM_DQALLOC_SPACE_RES(mp),
XFS_WRITE_LOG_RES(mp) + XFS_QM_DQALLOC_LOG_RES(mp), 0,
/* XFS_TRANS_PERM_LOG_RES,
* Round the chunklen up to the next multiple XFS_WRITE_LOG_COUNT);
* of 128 (buf log item chunk size)).
*/
BBTOB(mp->m_quotainfo->qi_dqchunklen) - 1 + 128,
0,
XFS_TRANS_PERM_LOG_RES,
XFS_WRITE_LOG_COUNT);
if (error) if (error)
goto error1; goto error1;
cancelflags = XFS_TRANS_RELEASE_LOG_RES; cancelflags = XFS_TRANS_RELEASE_LOG_RES;
......
...@@ -709,8 +709,8 @@ xfs_fs_log_dummy( ...@@ -709,8 +709,8 @@ xfs_fs_log_dummy(
int error; int error;
tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP); tp = _xfs_trans_alloc(mp, XFS_TRANS_DUMMY1, KM_SLEEP);
error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, error = xfs_trans_reserve(tp, 0, XFS_SB_LOG_RES(mp), 0, 0,
XFS_DEFAULT_LOG_COUNT); XFS_DEFAULT_LOG_COUNT);
if (error) { if (error) {
xfs_trans_cancel(tp, 0); xfs_trans_cancel(tp, 0);
return error; return error;
......
...@@ -279,8 +279,6 @@ xfs_ialloc_ag_alloc( ...@@ -279,8 +279,6 @@ xfs_ialloc_ag_alloc(
(args.agbno < be32_to_cpu(agi->agi_length)))) { (args.agbno < be32_to_cpu(agi->agi_length)))) {
args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno); args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno);
args.type = XFS_ALLOCTYPE_THIS_BNO; args.type = XFS_ALLOCTYPE_THIS_BNO;
args.mod = args.total = args.wasdel = args.isfl =
args.userdata = args.minalignslop = 0;
args.prod = 1; args.prod = 1;
/* /*
...@@ -333,8 +331,6 @@ xfs_ialloc_ag_alloc( ...@@ -333,8 +331,6 @@ xfs_ialloc_ag_alloc(
* Allocate a fixed-size extent of inodes. * Allocate a fixed-size extent of inodes.
*/ */
args.type = XFS_ALLOCTYPE_NEAR_BNO; args.type = XFS_ALLOCTYPE_NEAR_BNO;
args.mod = args.total = args.wasdel = args.isfl =
args.userdata = args.minalignslop = 0;
args.prod = 1; args.prod = 1;
/* /*
* Allow space for the inode btree to split. * Allow space for the inode btree to split.
......
...@@ -2379,9 +2379,6 @@ xfs_iflush_fork( ...@@ -2379,9 +2379,6 @@ xfs_iflush_fork(
char *cp; char *cp;
xfs_ifork_t *ifp; xfs_ifork_t *ifp;
xfs_mount_t *mp; xfs_mount_t *mp;
#ifdef XFS_TRANS_DEBUG
int first;
#endif
static const short brootflag[2] = static const short brootflag[2] =
{ XFS_ILOG_DBROOT, XFS_ILOG_ABROOT }; { XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
static const short dataflag[2] = static const short dataflag[2] =
...@@ -2724,9 +2721,6 @@ xfs_iflush_int( ...@@ -2724,9 +2721,6 @@ xfs_iflush_int(
xfs_inode_log_item_t *iip; xfs_inode_log_item_t *iip;
xfs_dinode_t *dip; xfs_dinode_t *dip;
xfs_mount_t *mp; xfs_mount_t *mp;
#ifdef XFS_TRANS_DEBUG
int first;
#endif
ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED)); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_ILOCK_SHARED));
ASSERT(xfs_isiflocked(ip)); ASSERT(xfs_isiflocked(ip));
......
...@@ -419,6 +419,7 @@ static inline void xfs_iflock(struct xfs_inode *ip) ...@@ -419,6 +419,7 @@ static inline void xfs_iflock(struct xfs_inode *ip)
static inline void xfs_ifunlock(struct xfs_inode *ip) static inline void xfs_ifunlock(struct xfs_inode *ip)
{ {
xfs_iflags_clear(ip, XFS_IFLOCK); xfs_iflags_clear(ip, XFS_IFLOCK);
smp_mb();
wake_up_bit(&ip->i_flags, __XFS_IFLOCK_BIT); wake_up_bit(&ip->i_flags, __XFS_IFLOCK_BIT);
} }
......
...@@ -269,17 +269,6 @@ xfs_inode_item_format( ...@@ -269,17 +269,6 @@ xfs_inode_item_format(
} else { } else {
ASSERT(!(iip->ili_fields & ASSERT(!(iip->ili_fields &
XFS_ILOG_DBROOT)); XFS_ILOG_DBROOT));
#ifdef XFS_TRANS_DEBUG
if (iip->ili_root_size > 0) {
ASSERT(iip->ili_root_size ==
ip->i_df.if_broot_bytes);
ASSERT(memcmp(iip->ili_orig_root,
ip->i_df.if_broot,
iip->ili_root_size) == 0);
} else {
ASSERT(ip->i_df.if_broot_bytes == 0);
}
#endif
iip->ili_fields &= ~XFS_ILOG_DBROOT; iip->ili_fields &= ~XFS_ILOG_DBROOT;
} }
break; break;
...@@ -678,11 +667,6 @@ void ...@@ -678,11 +667,6 @@ void
xfs_inode_item_destroy( xfs_inode_item_destroy(
xfs_inode_t *ip) xfs_inode_t *ip)
{ {
#ifdef XFS_TRANS_DEBUG
if (ip->i_itemp->ili_root_size != 0) {
kmem_free(ip->i_itemp->ili_orig_root);
}
#endif
kmem_zone_free(xfs_ili_zone, ip->i_itemp); kmem_zone_free(xfs_ili_zone, ip->i_itemp);
} }
......
...@@ -148,10 +148,6 @@ typedef struct xfs_inode_log_item { ...@@ -148,10 +148,6 @@ typedef struct xfs_inode_log_item {
data exts */ data exts */
struct xfs_bmbt_rec *ili_aextents_buf; /* array of logged struct xfs_bmbt_rec *ili_aextents_buf; /* array of logged
attr exts */ attr exts */
#ifdef XFS_TRANS_DEBUG
int ili_root_size;
char *ili_orig_root;
#endif
xfs_inode_log_format_t ili_format; /* logged structure */ xfs_inode_log_format_t ili_format; /* logged structure */
} xfs_inode_log_item_t; } xfs_inode_log_item_t;
......
...@@ -310,6 +310,62 @@ xfs_iomap_eof_want_preallocate( ...@@ -310,6 +310,62 @@ xfs_iomap_eof_want_preallocate(
return 0; return 0;
} }
/*
* Determine the initial size of the preallocation. We are beyond the current
* EOF here, but we need to take into account whether this is a sparse write or
* an extending write when determining the preallocation size. Hence we need to
* look up the extent that ends at the current write offset and use the result
* to determine the preallocation size.
*
* If the extent is a hole, then preallocation is essentially disabled.
* Otherwise we take the size of the preceeding data extent as the basis for the
* preallocation size. If the size of the extent is greater than half the
* maximum extent length, then use the current offset as the basis. This ensures
* that for large files the preallocation size always extends to MAXEXTLEN
* rather than falling short due to things like stripe unit/width alignment of
* real extents.
*/
STATIC int
xfs_iomap_eof_prealloc_initial_size(
struct xfs_mount *mp,
struct xfs_inode *ip,
xfs_off_t offset,
xfs_bmbt_irec_t *imap,
int nimaps)
{
xfs_fileoff_t start_fsb;
int imaps = 1;
int error;
ASSERT(nimaps >= imaps);
/* if we are using a specific prealloc size, return now */
if (mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)
return 0;
/*
* As we write multiple pages, the offset will always align to the
* start of a page and hence point to a hole at EOF. i.e. if the size is
* 4096 bytes, we only have one block at FSB 0, but XFS_B_TO_FSB(4096)
* will return FSB 1. Hence if there are blocks in the file, we want to
* point to the block prior to the EOF block and not the hole that maps
* directly at @offset.
*/
start_fsb = XFS_B_TO_FSB(mp, offset);
if (start_fsb)
start_fsb--;
error = xfs_bmapi_read(ip, start_fsb, 1, imap, &imaps, XFS_BMAPI_ENTIRE);
if (error)
return 0;
ASSERT(imaps == 1);
if (imap[0].br_startblock == HOLESTARTBLOCK)
return 0;
if (imap[0].br_blockcount <= (MAXEXTLEN >> 1))
return imap[0].br_blockcount;
return XFS_B_TO_FSB(mp, offset);
}
/* /*
* If we don't have a user specified preallocation size, dynamically increase * If we don't have a user specified preallocation size, dynamically increase
* the preallocation size as the size of the file grows. Cap the maximum size * the preallocation size as the size of the file grows. Cap the maximum size
...@@ -319,20 +375,19 @@ xfs_iomap_eof_want_preallocate( ...@@ -319,20 +375,19 @@ xfs_iomap_eof_want_preallocate(
STATIC xfs_fsblock_t STATIC xfs_fsblock_t
xfs_iomap_prealloc_size( xfs_iomap_prealloc_size(
struct xfs_mount *mp, struct xfs_mount *mp,
struct xfs_inode *ip) struct xfs_inode *ip,
xfs_off_t offset,
struct xfs_bmbt_irec *imap,
int nimaps)
{ {
xfs_fsblock_t alloc_blocks = 0; xfs_fsblock_t alloc_blocks = 0;
if (!(mp->m_flags & XFS_MOUNT_DFLT_IOSIZE)) { alloc_blocks = xfs_iomap_eof_prealloc_initial_size(mp, ip, offset,
imap, nimaps);
if (alloc_blocks > 0) {
int shift = 0; int shift = 0;
int64_t freesp; int64_t freesp;
/*
* rounddown_pow_of_two() returns an undefined result
* if we pass in alloc_blocks = 0. Hence the "+ 1" to
* ensure we always pass in a non-zero value.
*/
alloc_blocks = XFS_B_TO_FSB(mp, XFS_ISIZE(ip)) + 1;
alloc_blocks = XFS_FILEOFF_MIN(MAXEXTLEN, alloc_blocks = XFS_FILEOFF_MIN(MAXEXTLEN,
rounddown_pow_of_two(alloc_blocks)); rounddown_pow_of_two(alloc_blocks));
...@@ -399,7 +454,6 @@ xfs_iomap_write_delay( ...@@ -399,7 +454,6 @@ xfs_iomap_write_delay(
extsz = xfs_get_extsz_hint(ip); extsz = xfs_get_extsz_hint(ip);
offset_fsb = XFS_B_TO_FSBT(mp, offset); offset_fsb = XFS_B_TO_FSBT(mp, offset);
error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count, error = xfs_iomap_eof_want_preallocate(mp, ip, offset, count,
imap, XFS_WRITE_IMAPS, &prealloc); imap, XFS_WRITE_IMAPS, &prealloc);
if (error) if (error)
...@@ -407,7 +461,10 @@ xfs_iomap_write_delay( ...@@ -407,7 +461,10 @@ xfs_iomap_write_delay(
retry: retry:
if (prealloc) { if (prealloc) {
xfs_fsblock_t alloc_blocks = xfs_iomap_prealloc_size(mp, ip); xfs_fsblock_t alloc_blocks;
alloc_blocks = xfs_iomap_prealloc_size(mp, ip, offset, imap,
XFS_WRITE_IMAPS);
aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1)); aligned_offset = XFS_WRITEIO_ALIGN(mp, (offset + count - 1));
ioalign = XFS_B_TO_FSBT(mp, aligned_offset); ioalign = XFS_B_TO_FSBT(mp, aligned_offset);
......
...@@ -120,7 +120,7 @@ xlog_verify_iclog( ...@@ -120,7 +120,7 @@ xlog_verify_iclog(
struct xlog *log, struct xlog *log,
struct xlog_in_core *iclog, struct xlog_in_core *iclog,
int count, int count,
boolean_t syncing); bool syncing);
STATIC void STATIC void
xlog_verify_tail_lsn( xlog_verify_tail_lsn(
struct xlog *log, struct xlog *log,
...@@ -1737,7 +1737,7 @@ xlog_sync( ...@@ -1737,7 +1737,7 @@ xlog_sync(
ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1); ASSERT(XFS_BUF_ADDR(bp) <= log->l_logBBsize-1);
ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize); ASSERT(XFS_BUF_ADDR(bp) + BTOBB(count) <= log->l_logBBsize);
xlog_verify_iclog(log, iclog, count, B_TRUE); xlog_verify_iclog(log, iclog, count, true);
/* account for log which doesn't start at block #0 */ /* account for log which doesn't start at block #0 */
XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart); XFS_BUF_SET_ADDR(bp, XFS_BUF_ADDR(bp) + log->l_logBBstart);
...@@ -3611,7 +3611,7 @@ xlog_verify_iclog( ...@@ -3611,7 +3611,7 @@ xlog_verify_iclog(
struct xlog *log, struct xlog *log,
struct xlog_in_core *iclog, struct xlog_in_core *iclog,
int count, int count,
boolean_t syncing) bool syncing)
{ {
xlog_op_header_t *ophead; xlog_op_header_t *ophead;
xlog_in_core_t *icptr; xlog_in_core_t *icptr;
...@@ -3659,7 +3659,7 @@ xlog_verify_iclog( ...@@ -3659,7 +3659,7 @@ xlog_verify_iclog(
/* clientid is only 1 byte */ /* clientid is only 1 byte */
field_offset = (__psint_t) field_offset = (__psint_t)
((xfs_caddr_t)&(ophead->oh_clientid) - base_ptr); ((xfs_caddr_t)&(ophead->oh_clientid) - base_ptr);
if (syncing == B_FALSE || (field_offset & 0x1ff)) { if (!syncing || (field_offset & 0x1ff)) {
clientid = ophead->oh_clientid; clientid = ophead->oh_clientid;
} else { } else {
idx = BTOBBT((xfs_caddr_t)&(ophead->oh_clientid) - iclog->ic_datap); idx = BTOBBT((xfs_caddr_t)&(ophead->oh_clientid) - iclog->ic_datap);
...@@ -3682,7 +3682,7 @@ xlog_verify_iclog( ...@@ -3682,7 +3682,7 @@ xlog_verify_iclog(
/* check length */ /* check length */
field_offset = (__psint_t) field_offset = (__psint_t)
((xfs_caddr_t)&(ophead->oh_len) - base_ptr); ((xfs_caddr_t)&(ophead->oh_len) - base_ptr);
if (syncing == B_FALSE || (field_offset & 0x1ff)) { if (!syncing || (field_offset & 0x1ff)) {
op_len = be32_to_cpu(ophead->oh_len); op_len = be32_to_cpu(ophead->oh_len);
} else { } else {
idx = BTOBBT((__psint_t)&ophead->oh_len - idx = BTOBBT((__psint_t)&ophead->oh_len -
......
...@@ -1109,8 +1109,8 @@ xfs_mount_reset_sbqflags( ...@@ -1109,8 +1109,8 @@ xfs_mount_reset_sbqflags(
return 0; return 0;
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, error = xfs_trans_reserve(tp, 0, XFS_QM_SBCHANGE_LOG_RES(mp),
XFS_DEFAULT_LOG_COUNT); 0, 0, XFS_DEFAULT_LOG_COUNT);
if (error) { if (error) {
xfs_trans_cancel(tp, 0); xfs_trans_cancel(tp, 0);
xfs_alert(mp, "%s: Superblock update failed!", __func__); xfs_alert(mp, "%s: Superblock update failed!", __func__);
...@@ -1583,8 +1583,8 @@ xfs_log_sbcount(xfs_mount_t *mp) ...@@ -1583,8 +1583,8 @@ xfs_log_sbcount(xfs_mount_t *mp)
return 0; return 0;
tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP); tp = _xfs_trans_alloc(mp, XFS_TRANS_SB_COUNT, KM_SLEEP);
error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, error = xfs_trans_reserve(tp, 0, XFS_SB_LOG_RES(mp), 0, 0,
XFS_DEFAULT_LOG_COUNT); XFS_DEFAULT_LOG_COUNT);
if (error) { if (error) {
xfs_trans_cancel(tp, 0); xfs_trans_cancel(tp, 0);
return error; return error;
...@@ -1945,8 +1945,8 @@ xfs_mount_log_sb( ...@@ -1945,8 +1945,8 @@ xfs_mount_log_sb(
XFS_SB_VERSIONNUM)); XFS_SB_VERSIONNUM));
tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT); tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT);
error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0, error = xfs_trans_reserve(tp, 0, XFS_SB_LOG_RES(mp), 0, 0,
XFS_DEFAULT_LOG_COUNT); XFS_DEFAULT_LOG_COUNT);
if (error) { if (error) {
xfs_trans_cancel(tp, 0); xfs_trans_cancel(tp, 0);
return error; return error;
......
...@@ -34,12 +34,19 @@ typedef struct xfs_trans_reservations { ...@@ -34,12 +34,19 @@ typedef struct xfs_trans_reservations {
uint tr_addafork; /* cvt inode to attributed trans */ uint tr_addafork; /* cvt inode to attributed trans */
uint tr_writeid; /* write setuid/setgid file */ uint tr_writeid; /* write setuid/setgid file */
uint tr_attrinval; /* attr fork buffer invalidation */ uint tr_attrinval; /* attr fork buffer invalidation */
uint tr_attrset; /* set/create an attribute */ uint tr_attrsetm; /* set/create an attribute at mount time */
uint tr_attrsetrt; /* set/create an attribute at runtime */
uint tr_attrrm; /* remove an attribute */ uint tr_attrrm; /* remove an attribute */
uint tr_clearagi; /* clear bad agi unlinked ino bucket */ uint tr_clearagi; /* clear bad agi unlinked ino bucket */
uint tr_growrtalloc; /* grow realtime allocations */ uint tr_growrtalloc; /* grow realtime allocations */
uint tr_growrtzero; /* grow realtime zeroing */ uint tr_growrtzero; /* grow realtime zeroing */
uint tr_growrtfree; /* grow realtime freeing */ uint tr_growrtfree; /* grow realtime freeing */
uint tr_qm_sbchange; /* change quota flags */
uint tr_qm_setqlim; /* adjust quota limits */
uint tr_qm_dqalloc; /* allocate quota on disk */
uint tr_qm_quotaoff; /* turn quota off */
uint tr_qm_equotaoff;/* end of turn quota off */
uint tr_sb; /* modify superblock */
} xfs_trans_reservations_t; } xfs_trans_reservations_t;
#ifndef __KERNEL__ #ifndef __KERNEL__
......
...@@ -1584,10 +1584,9 @@ xfs_qm_write_sb_changes( ...@@ -1584,10 +1584,9 @@ xfs_qm_write_sb_changes(
int error; int error;
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE); tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SBCHANGE);
if ((error = xfs_trans_reserve(tp, 0, error = xfs_trans_reserve(tp, 0, XFS_QM_SBCHANGE_LOG_RES(mp),
mp->m_sb.sb_sectsize + 128, 0, 0, 0, XFS_DEFAULT_LOG_COUNT);
0, if (error) {
XFS_DEFAULT_LOG_COUNT))) {
xfs_trans_cancel(tp, 0); xfs_trans_cancel(tp, 0);
return error; return error;
} }
......
...@@ -146,7 +146,7 @@ xfs_qm_newmount( ...@@ -146,7 +146,7 @@ xfs_qm_newmount(
* inode goes inactive and wants to free blocks, * inode goes inactive and wants to free blocks,
* or via xfs_log_mount_finish. * or via xfs_log_mount_finish.
*/ */
*needquotamount = B_TRUE; *needquotamount = true;
*quotaflags = mp->m_qflags; *quotaflags = mp->m_qflags;
mp->m_qflags = 0; mp->m_qflags = 0;
} }
......
...@@ -408,10 +408,10 @@ xfs_qm_scall_getqstat( ...@@ -408,10 +408,10 @@ xfs_qm_scall_getqstat(
{ {
struct xfs_quotainfo *q = mp->m_quotainfo; struct xfs_quotainfo *q = mp->m_quotainfo;
struct xfs_inode *uip, *gip; struct xfs_inode *uip, *gip;
boolean_t tempuqip, tempgqip; bool tempuqip, tempgqip;
uip = gip = NULL; uip = gip = NULL;
tempuqip = tempgqip = B_FALSE; tempuqip = tempgqip = false;
memset(out, 0, sizeof(fs_quota_stat_t)); memset(out, 0, sizeof(fs_quota_stat_t));
out->qs_version = FS_QSTAT_VERSION; out->qs_version = FS_QSTAT_VERSION;
...@@ -434,12 +434,12 @@ xfs_qm_scall_getqstat( ...@@ -434,12 +434,12 @@ xfs_qm_scall_getqstat(
if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) { if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
0, 0, &uip) == 0) 0, 0, &uip) == 0)
tempuqip = B_TRUE; tempuqip = true;
} }
if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) { if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) {
if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
0, 0, &gip) == 0) 0, 0, &gip) == 0)
tempgqip = B_TRUE; tempgqip = true;
} }
if (uip) { if (uip) {
out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks; out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks;
...@@ -490,8 +490,9 @@ xfs_qm_scall_setqlim( ...@@ -490,8 +490,9 @@ xfs_qm_scall_setqlim(
return 0; return 0;
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM); tp = xfs_trans_alloc(mp, XFS_TRANS_QM_SETQLIM);
if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_disk_dquot_t) + 128, error = xfs_trans_reserve(tp, 0, XFS_QM_SETQLIM_LOG_RES(mp),
0, 0, XFS_DEFAULT_LOG_COUNT))) { 0, 0, XFS_DEFAULT_LOG_COUNT);
if (error) {
xfs_trans_cancel(tp, 0); xfs_trans_cancel(tp, 0);
return (error); return (error);
} }
...@@ -638,8 +639,9 @@ xfs_qm_log_quotaoff_end( ...@@ -638,8 +639,9 @@ xfs_qm_log_quotaoff_end(
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF_END); tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF_END);
if ((error = xfs_trans_reserve(tp, 0, sizeof(xfs_qoff_logitem_t) * 2, error = xfs_trans_reserve(tp, 0, XFS_QM_QUOTAOFF_END_LOG_RES(mp),
0, 0, XFS_DEFAULT_LOG_COUNT))) { 0, 0, XFS_DEFAULT_LOG_COUNT);
if (error) {
xfs_trans_cancel(tp, 0); xfs_trans_cancel(tp, 0);
return (error); return (error);
} }
...@@ -671,14 +673,10 @@ xfs_qm_log_quotaoff( ...@@ -671,14 +673,10 @@ xfs_qm_log_quotaoff(
uint oldsbqflag=0; uint oldsbqflag=0;
tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF); tp = xfs_trans_alloc(mp, XFS_TRANS_QM_QUOTAOFF);
if ((error = xfs_trans_reserve(tp, 0, error = xfs_trans_reserve(tp, 0, XFS_QM_QUOTAOFF_LOG_RES(mp),
sizeof(xfs_qoff_logitem_t) * 2 + 0, 0, XFS_DEFAULT_LOG_COUNT);
mp->m_sb.sb_sectsize + 128, if (error)
0,
0,
XFS_DEFAULT_LOG_COUNT))) {
goto error0; goto error0;
}
qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT); qoffi = xfs_trans_get_qoff_item(tp, NULL, flags & XFS_ALL_QUOTA_ACCT);
xfs_trans_log_quotaoff_item(tp, qoffi); xfs_trans_log_quotaoff_item(tp, qoffi);
......
...@@ -139,9 +139,9 @@ static const match_table_t tokens = { ...@@ -139,9 +139,9 @@ static const match_table_t tokens = {
STATIC unsigned long STATIC unsigned long
suffix_strtoul(char *s, char **endp, unsigned int base) suffix_kstrtoint(char *s, unsigned int base, int *res)
{ {
int last, shift_left_factor = 0; int last, shift_left_factor = 0, _res;
char *value = s; char *value = s;
last = strlen(value) - 1; last = strlen(value) - 1;
...@@ -158,7 +158,10 @@ suffix_strtoul(char *s, char **endp, unsigned int base) ...@@ -158,7 +158,10 @@ suffix_strtoul(char *s, char **endp, unsigned int base)
value[last] = '\0'; value[last] = '\0';
} }
return simple_strtoul((const char *)s, endp, base) << shift_left_factor; if (kstrtoint(s, base, &_res))
return -EINVAL;
*res = _res << shift_left_factor;
return 0;
} }
/* /*
...@@ -174,7 +177,7 @@ xfs_parseargs( ...@@ -174,7 +177,7 @@ xfs_parseargs(
char *options) char *options)
{ {
struct super_block *sb = mp->m_super; struct super_block *sb = mp->m_super;
char *this_char, *value, *eov; char *this_char, *value;
int dsunit = 0; int dsunit = 0;
int dswidth = 0; int dswidth = 0;
int iosize = 0; int iosize = 0;
...@@ -230,14 +233,16 @@ xfs_parseargs( ...@@ -230,14 +233,16 @@ xfs_parseargs(
this_char); this_char);
return EINVAL; return EINVAL;
} }
mp->m_logbufs = simple_strtoul(value, &eov, 10); if (kstrtoint(value, 10, &mp->m_logbufs))
return EINVAL;
} else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) { } else if (!strcmp(this_char, MNTOPT_LOGBSIZE)) {
if (!value || !*value) { if (!value || !*value) {
xfs_warn(mp, "%s option requires an argument", xfs_warn(mp, "%s option requires an argument",
this_char); this_char);
return EINVAL; return EINVAL;
} }
mp->m_logbsize = suffix_strtoul(value, &eov, 10); if (suffix_kstrtoint(value, 10, &mp->m_logbsize))
return EINVAL;
} else if (!strcmp(this_char, MNTOPT_LOGDEV)) { } else if (!strcmp(this_char, MNTOPT_LOGDEV)) {
if (!value || !*value) { if (!value || !*value) {
xfs_warn(mp, "%s option requires an argument", xfs_warn(mp, "%s option requires an argument",
...@@ -266,7 +271,8 @@ xfs_parseargs( ...@@ -266,7 +271,8 @@ xfs_parseargs(
this_char); this_char);
return EINVAL; return EINVAL;
} }
iosize = simple_strtoul(value, &eov, 10); if (kstrtoint(value, 10, &iosize))
return EINVAL;
iosizelog = ffs(iosize) - 1; iosizelog = ffs(iosize) - 1;
} else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) { } else if (!strcmp(this_char, MNTOPT_ALLOCSIZE)) {
if (!value || !*value) { if (!value || !*value) {
...@@ -274,7 +280,8 @@ xfs_parseargs( ...@@ -274,7 +280,8 @@ xfs_parseargs(
this_char); this_char);
return EINVAL; return EINVAL;
} }
iosize = suffix_strtoul(value, &eov, 10); if (suffix_kstrtoint(value, 10, &iosize))
return EINVAL;
iosizelog = ffs(iosize) - 1; iosizelog = ffs(iosize) - 1;
} else if (!strcmp(this_char, MNTOPT_GRPID) || } else if (!strcmp(this_char, MNTOPT_GRPID) ||
!strcmp(this_char, MNTOPT_BSDGROUPS)) { !strcmp(this_char, MNTOPT_BSDGROUPS)) {
...@@ -296,14 +303,16 @@ xfs_parseargs( ...@@ -296,14 +303,16 @@ xfs_parseargs(
this_char); this_char);
return EINVAL; return EINVAL;
} }
dsunit = simple_strtoul(value, &eov, 10); if (kstrtoint(value, 10, &dsunit))
return EINVAL;
} else if (!strcmp(this_char, MNTOPT_SWIDTH)) { } else if (!strcmp(this_char, MNTOPT_SWIDTH)) {
if (!value || !*value) { if (!value || !*value) {
xfs_warn(mp, "%s option requires an argument", xfs_warn(mp, "%s option requires an argument",
this_char); this_char);
return EINVAL; return EINVAL;
} }
dswidth = simple_strtoul(value, &eov, 10); if (kstrtoint(value, 10, &dswidth))
return EINVAL;
} else if (!strcmp(this_char, MNTOPT_32BITINODE)) { } else if (!strcmp(this_char, MNTOPT_32BITINODE)) {
mp->m_flags |= XFS_MOUNT_SMALL_INUMS; mp->m_flags |= XFS_MOUNT_SMALL_INUMS;
} else if (!strcmp(this_char, MNTOPT_64BITINODE)) { } else if (!strcmp(this_char, MNTOPT_64BITINODE)) {
......
This diff is collapsed.
...@@ -252,17 +252,19 @@ struct xfs_log_item_desc { ...@@ -252,17 +252,19 @@ struct xfs_log_item_desc {
* as long as SWRITE logs the entire inode core * as long as SWRITE logs the entire inode core
*/ */
#define XFS_FSYNC_TS_LOG_RES(mp) ((mp)->m_reservations.tr_swrite) #define XFS_FSYNC_TS_LOG_RES(mp) ((mp)->m_reservations.tr_swrite)
#define XFS_WRITEID_LOG_RES(mp) ((mp)->m_reservations.tr_swrite) #define XFS_WRITEID_LOG_RES(mp) ((mp)->m_reservations.tr_swrite)
#define XFS_ADDAFORK_LOG_RES(mp) ((mp)->m_reservations.tr_addafork) #define XFS_ADDAFORK_LOG_RES(mp) ((mp)->m_reservations.tr_addafork)
#define XFS_ATTRINVAL_LOG_RES(mp) ((mp)->m_reservations.tr_attrinval) #define XFS_ATTRINVAL_LOG_RES(mp) ((mp)->m_reservations.tr_attrinval)
#define XFS_ATTRSET_LOG_RES(mp, ext) \ #define XFS_ATTRSETM_LOG_RES(mp) ((mp)->m_reservations.tr_attrsetm)
((mp)->m_reservations.tr_attrset + \ #define XFS_ATTRSETRT_LOG_RES(mp) ((mp)->m_reservations.tr_attrsetrt)
(ext * (mp)->m_sb.sb_sectsize) + \ #define XFS_ATTRRM_LOG_RES(mp) ((mp)->m_reservations.tr_attrrm)
(ext * XFS_FSB_TO_B((mp), XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK))) + \
(128 * (ext + (ext * XFS_BM_MAXLEVELS(mp, XFS_ATTR_FORK)))))
#define XFS_ATTRRM_LOG_RES(mp) ((mp)->m_reservations.tr_attrrm)
#define XFS_CLEAR_AGI_BUCKET_LOG_RES(mp) ((mp)->m_reservations.tr_clearagi) #define XFS_CLEAR_AGI_BUCKET_LOG_RES(mp) ((mp)->m_reservations.tr_clearagi)
#define XFS_QM_SBCHANGE_LOG_RES(mp) ((mp)->m_reservations.tr_qm_sbchange)
#define XFS_QM_SETQLIM_LOG_RES(mp) ((mp)->m_reservations.tr_qm_setqlim)
#define XFS_QM_DQALLOC_LOG_RES(mp) ((mp)->m_reservations.tr_qm_dqalloc)
#define XFS_QM_QUOTAOFF_LOG_RES(mp) ((mp)->m_reservations.tr_qm_quotaoff)
#define XFS_QM_QUOTAOFF_END_LOG_RES(mp) ((mp)->m_reservations.tr_qm_equotaoff)
#define XFS_SB_LOG_RES(mp) ((mp)->m_reservations.tr_sb)
/* /*
* Various log count values. * Various log count values.
......
...@@ -55,20 +55,6 @@ xfs_ail_check( ...@@ -55,20 +55,6 @@ xfs_ail_check(
ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0); ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) >= 0);
#ifdef XFS_TRANS_DEBUG
/*
* Walk the list checking lsn ordering, and that every entry has the
* XFS_LI_IN_AIL flag set. This is really expensive, so only do it
* when specifically debugging the transaction subsystem.
*/
prev_lip = list_entry(&ailp->xa_ail, xfs_log_item_t, li_ail);
list_for_each_entry(lip, &ailp->xa_ail, li_ail) {
if (&prev_lip->li_ail != &ailp->xa_ail)
ASSERT(XFS_LSN_CMP(prev_lip->li_lsn, lip->li_lsn) <= 0);
ASSERT((lip->li_flags & XFS_LI_IN_AIL) != 0);
prev_lip = lip;
}
#endif /* XFS_TRANS_DEBUG */
} }
#else /* !DEBUG */ #else /* !DEBUG */
#define xfs_ail_check(a,l) #define xfs_ail_check(a,l)
......
...@@ -516,7 +516,7 @@ xfs_trans_unreserve_and_mod_dquots( ...@@ -516,7 +516,7 @@ xfs_trans_unreserve_and_mod_dquots(
int i, j; int i, j;
xfs_dquot_t *dqp; xfs_dquot_t *dqp;
xfs_dqtrx_t *qtrx, *qa; xfs_dqtrx_t *qtrx, *qa;
boolean_t locked; bool locked;
if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY)) if (!tp->t_dqinfo || !(tp->t_flags & XFS_TRANS_DQ_DIRTY))
return; return;
...@@ -537,17 +537,17 @@ xfs_trans_unreserve_and_mod_dquots( ...@@ -537,17 +537,17 @@ xfs_trans_unreserve_and_mod_dquots(
* about the number of blocks used field, or deltas. * about the number of blocks used field, or deltas.
* Also we don't bother to zero the fields. * Also we don't bother to zero the fields.
*/ */
locked = B_FALSE; locked = false;
if (qtrx->qt_blk_res) { if (qtrx->qt_blk_res) {
xfs_dqlock(dqp); xfs_dqlock(dqp);
locked = B_TRUE; locked = true;
dqp->q_res_bcount -= dqp->q_res_bcount -=
(xfs_qcnt_t)qtrx->qt_blk_res; (xfs_qcnt_t)qtrx->qt_blk_res;
} }
if (qtrx->qt_ino_res) { if (qtrx->qt_ino_res) {
if (!locked) { if (!locked) {
xfs_dqlock(dqp); xfs_dqlock(dqp);
locked = B_TRUE; locked = true;
} }
dqp->q_res_icount -= dqp->q_res_icount -=
(xfs_qcnt_t)qtrx->qt_ino_res; (xfs_qcnt_t)qtrx->qt_ino_res;
...@@ -556,7 +556,7 @@ xfs_trans_unreserve_and_mod_dquots( ...@@ -556,7 +556,7 @@ xfs_trans_unreserve_and_mod_dquots(
if (qtrx->qt_rtblk_res) { if (qtrx->qt_rtblk_res) {
if (!locked) { if (!locked) {
xfs_dqlock(dqp); xfs_dqlock(dqp);
locked = B_TRUE; locked = true;
} }
dqp->q_res_rtbcount -= dqp->q_res_rtbcount -=
(xfs_qcnt_t)qtrx->qt_rtblk_res; (xfs_qcnt_t)qtrx->qt_rtblk_res;
......
...@@ -33,14 +33,6 @@ ...@@ -33,14 +33,6 @@
#include "xfs_inode_item.h" #include "xfs_inode_item.h"
#include "xfs_trace.h" #include "xfs_trace.h"
#ifdef XFS_TRANS_DEBUG
STATIC void
xfs_trans_inode_broot_debug(
xfs_inode_t *ip);
#else
#define xfs_trans_inode_broot_debug(ip)
#endif
/* /*
* Add a locked inode to the transaction. * Add a locked inode to the transaction.
* *
...@@ -67,8 +59,6 @@ xfs_trans_ijoin( ...@@ -67,8 +59,6 @@ xfs_trans_ijoin(
* Get a log_item_desc to point at the new item. * Get a log_item_desc to point at the new item.
*/ */
xfs_trans_add_item(tp, &iip->ili_item); xfs_trans_add_item(tp, &iip->ili_item);
xfs_trans_inode_broot_debug(ip);
} }
/* /*
...@@ -135,34 +125,3 @@ xfs_trans_log_inode( ...@@ -135,34 +125,3 @@ xfs_trans_log_inode(
flags |= ip->i_itemp->ili_last_fields; flags |= ip->i_itemp->ili_last_fields;
ip->i_itemp->ili_fields |= flags; ip->i_itemp->ili_fields |= flags;
} }
#ifdef XFS_TRANS_DEBUG
/*
* Keep track of the state of the inode btree root to make sure we
* log it properly.
*/
STATIC void
xfs_trans_inode_broot_debug(
xfs_inode_t *ip)
{
xfs_inode_log_item_t *iip;
ASSERT(ip->i_itemp != NULL);
iip = ip->i_itemp;
if (iip->ili_root_size != 0) {
ASSERT(iip->ili_orig_root != NULL);
kmem_free(iip->ili_orig_root);
iip->ili_root_size = 0;
iip->ili_orig_root = NULL;
}
if (ip->i_d.di_format == XFS_DINODE_FMT_BTREE) {
ASSERT((ip->i_df.if_broot != NULL) &&
(ip->i_df.if_broot_bytes > 0));
iip->ili_root_size = ip->i_df.if_broot_bytes;
iip->ili_orig_root =
(char*)kmem_alloc(iip->ili_root_size, KM_SLEEP);
memcpy(iip->ili_orig_root, (char*)(ip->i_df.if_broot),
iip->ili_root_size);
}
}
#endif
...@@ -32,7 +32,6 @@ typedef unsigned int __uint32_t; ...@@ -32,7 +32,6 @@ typedef unsigned int __uint32_t;
typedef signed long long int __int64_t; typedef signed long long int __int64_t;
typedef unsigned long long int __uint64_t; typedef unsigned long long int __uint64_t;
typedef enum { B_FALSE,B_TRUE } boolean_t;
typedef __uint32_t prid_t; /* project ID */ typedef __uint32_t prid_t; /* project ID */
typedef __uint32_t inst_t; /* an instruction */ typedef __uint32_t inst_t; /* an instruction */
......
...@@ -725,7 +725,7 @@ xfs_create( ...@@ -725,7 +725,7 @@ xfs_create(
int error; int error;
xfs_bmap_free_t free_list; xfs_bmap_free_t free_list;
xfs_fsblock_t first_block; xfs_fsblock_t first_block;
boolean_t unlock_dp_on_error = B_FALSE; bool unlock_dp_on_error = false;
uint cancel_flags; uint cancel_flags;
int committed; int committed;
prid_t prid; prid_t prid;
...@@ -794,7 +794,7 @@ xfs_create( ...@@ -794,7 +794,7 @@ xfs_create(
} }
xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT); xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
unlock_dp_on_error = B_TRUE; unlock_dp_on_error = true;
xfs_bmap_init(&free_list, &first_block); xfs_bmap_init(&free_list, &first_block);
...@@ -830,7 +830,7 @@ xfs_create( ...@@ -830,7 +830,7 @@ xfs_create(
* error path. * error path.
*/ */
xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
unlock_dp_on_error = B_FALSE; unlock_dp_on_error = false;
error = xfs_dir_createname(tp, dp, name, ip->i_ino, error = xfs_dir_createname(tp, dp, name, ip->i_ino,
&first_block, &free_list, resblks ? &first_block, &free_list, resblks ?
...@@ -1367,7 +1367,7 @@ xfs_symlink( ...@@ -1367,7 +1367,7 @@ xfs_symlink(
int pathlen; int pathlen;
xfs_bmap_free_t free_list; xfs_bmap_free_t free_list;
xfs_fsblock_t first_block; xfs_fsblock_t first_block;
boolean_t unlock_dp_on_error = B_FALSE; bool unlock_dp_on_error = false;
uint cancel_flags; uint cancel_flags;
int committed; int committed;
xfs_fileoff_t first_fsb; xfs_fileoff_t first_fsb;
...@@ -1438,7 +1438,7 @@ xfs_symlink( ...@@ -1438,7 +1438,7 @@ xfs_symlink(
} }
xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT); xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
unlock_dp_on_error = B_TRUE; unlock_dp_on_error = true;
/* /*
* Check whether the directory allows new symlinks or not. * Check whether the directory allows new symlinks or not.
...@@ -1484,7 +1484,7 @@ xfs_symlink( ...@@ -1484,7 +1484,7 @@ xfs_symlink(
* error path. * error path.
*/ */
xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL); xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
unlock_dp_on_error = B_FALSE; unlock_dp_on_error = false;
/* /*
* Also attach the dquot(s) to it, if applicable. * Also attach the dquot(s) to it, if applicable.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment