Commit 540414f5 authored by Timothy Shimmin's avatar Timothy Shimmin Committed by Nathan Scott

[XFS] Version 2 log fixes - remove l_stripemask and add v2 log stripe

padding to ic_roundoff to cater for pad in reservation cursor updates.

SGI Modid: xfs-linux:xfs-kern:167580a
parent 73a61e86
...@@ -759,8 +759,9 @@ xfs_log_move_tail(xfs_mount_t *mp, ...@@ -759,8 +759,9 @@ xfs_log_move_tail(xfs_mount_t *mp,
/* Also an invalid lsn. 1 implies that we aren't passing in a valid /* Also an invalid lsn. 1 implies that we aren't passing in a valid
* tail_lsn. * tail_lsn.
*/ */
if (tail_lsn != 1) if (tail_lsn != 1) {
log->l_tail_lsn = tail_lsn; log->l_tail_lsn = tail_lsn;
}
if ((tic = log->l_write_headq)) { if ((tic = log->l_write_headq)) {
#ifdef DEBUG #ifdef DEBUG
...@@ -866,10 +867,11 @@ xlog_assign_tail_lsn(xfs_mount_t *mp) ...@@ -866,10 +867,11 @@ xlog_assign_tail_lsn(xfs_mount_t *mp)
tail_lsn = xfs_trans_tail_ail(mp); tail_lsn = xfs_trans_tail_ail(mp);
s = GRANT_LOCK(log); s = GRANT_LOCK(log);
if (tail_lsn != 0) if (tail_lsn != 0) {
log->l_tail_lsn = tail_lsn; log->l_tail_lsn = tail_lsn;
else } else {
tail_lsn = log->l_tail_lsn = log->l_last_sync_lsn; tail_lsn = log->l_tail_lsn = log->l_last_sync_lsn;
}
GRANT_UNLOCK(log, s); GRANT_UNLOCK(log, s);
return tail_lsn; return tail_lsn;
...@@ -921,10 +923,8 @@ xlog_space_left(xlog_t *log, int cycle, int bytes) ...@@ -921,10 +923,8 @@ xlog_space_left(xlog_t *log, int cycle, int bytes)
* In this case we just want to return the size of the * In this case we just want to return the size of the
* log as the amount of space left. * log as the amount of space left.
*/ */
/* This assert does not take into account padding from striped log writes *
ASSERT((tail_cycle == (cycle + 1)) || ASSERT((tail_cycle == (cycle + 1)) ||
((bytes + log->l_roundoff) >= tail_bytes)); ((bytes + log->l_roundoff) >= tail_bytes));
*/
free_bytes = log->l_logsize; free_bytes = log->l_logsize;
} }
return free_bytes; return free_bytes;
...@@ -1183,14 +1183,6 @@ xlog_alloc_log(xfs_mount_t *mp, ...@@ -1183,14 +1183,6 @@ xlog_alloc_log(xfs_mount_t *mp,
log->l_grant_reserve_cycle = 1; log->l_grant_reserve_cycle = 1;
log->l_grant_write_cycle = 1; log->l_grant_write_cycle = 1;
if (XFS_SB_VERSION_HASLOGV2(&mp->m_sb)) {
if (mp->m_sb.sb_logsunit <= 1) {
log->l_stripemask = 1;
} else {
log->l_stripemask = 1 <<
xfs_highbit32(mp->m_sb.sb_logsunit >> BBSHIFT);
}
}
if (XFS_SB_VERSION_HASSECTOR(&mp->m_sb)) { if (XFS_SB_VERSION_HASSECTOR(&mp->m_sb)) {
log->l_sectbb_log = mp->m_sb.sb_logsectlog - BBSHIFT; log->l_sectbb_log = mp->m_sb.sb_logsectlog - BBSHIFT;
ASSERT(log->l_sectbb_log <= mp->m_sectbb_log); ASSERT(log->l_sectbb_log <= mp->m_sectbb_log);
...@@ -1401,45 +1393,35 @@ xlog_sync(xlog_t *log, ...@@ -1401,45 +1393,35 @@ xlog_sync(xlog_t *log,
xfs_caddr_t dptr; /* pointer to byte sized element */ xfs_caddr_t dptr; /* pointer to byte sized element */
xfs_buf_t *bp; xfs_buf_t *bp;
int i, ops; int i, ops;
uint roundup;
uint count; /* byte count of bwrite */ uint count; /* byte count of bwrite */
uint count_init; /* initial count before roundup */
int split = 0; /* split write into two regions */ int split = 0; /* split write into two regions */
int error; int error;
XFS_STATS_INC(xs_log_writes); XFS_STATS_INC(xs_log_writes);
ASSERT(iclog->ic_refcnt == 0); ASSERT(iclog->ic_refcnt == 0);
/* Round out the log write size */ /* Add for LR header */
if (iclog->ic_offset & BBMASK) { count_init = log->l_iclog_hsize + iclog->ic_offset;
/* count of 0 is already accounted for up in
* xlog_state_sync_all(). Once in this routine,
* operations on the iclog are single threaded.
*
* Difference between rounded up size and size
*/
count = iclog->ic_offset & BBMASK;
iclog->ic_roundoff += BBSIZE - count;
}
if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) {
unsigned sunit = BTOBB(log->l_mp->m_sb.sb_logsunit);
if (!sunit)
sunit = 1;
count = BTOBB(log->l_iclog_hsize + iclog->ic_offset); /* Round out the log write size */
if (count & (sunit - 1)) { if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) &&
roundup = sunit - (count & (sunit - 1)); log->l_mp->m_sb.sb_logsunit > 1) {
} else { /* we have a v2 stripe unit to use */
roundup = 0; count = XLOG_LSUNITTOB(log, XLOG_BTOLSUNIT(log, count_init));
} } else {
iclog->ic_offset += BBTOB(roundup); count = BBTOB(BTOBB(count_init));
} }
iclog->ic_roundoff = count - count_init;
log->l_roundoff += iclog->ic_roundoff; log->l_roundoff += iclog->ic_roundoff;
xlog_pack_data(log, iclog); /* put cycle number in every block */ xlog_pack_data(log, iclog); /* put cycle number in every block */
/* real byte length */ /* real byte length */
INT_SET(iclog->ic_header.h_len, ARCH_CONVERT, iclog->ic_offset); INT_SET(iclog->ic_header.h_len,
ARCH_CONVERT,
iclog->ic_offset + iclog->ic_roundoff);
/* put ops count in correct order */ /* put ops count in correct order */
ops = iclog->ic_header.h_num_logops; ops = iclog->ic_header.h_num_logops;
INT_SET(iclog->ic_header.h_num_logops, ARCH_CONVERT, ops); INT_SET(iclog->ic_header.h_num_logops, ARCH_CONVERT, ops);
...@@ -1449,12 +1431,6 @@ xlog_sync(xlog_t *log, ...@@ -1449,12 +1431,6 @@ xlog_sync(xlog_t *log,
XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2); XFS_BUF_SET_FSPRIVATE2(bp, (unsigned long)2);
XFS_BUF_SET_ADDR(bp, BLOCK_LSN(iclog->ic_header.h_lsn, ARCH_CONVERT)); XFS_BUF_SET_ADDR(bp, BLOCK_LSN(iclog->ic_header.h_lsn, ARCH_CONVERT));
/* Count is already rounded up to a BBSIZE above */
count = iclog->ic_offset + iclog->ic_roundoff;
ASSERT((count & BBMASK) == 0);
/* Add for LR header */
count += log->l_iclog_hsize;
XFS_STATS_ADD(xs_log_blocks, BTOBB(count)); XFS_STATS_ADD(xs_log_blocks, BTOBB(count));
/* Do we need to split this write into 2 parts? */ /* Do we need to split this write into 2 parts? */
...@@ -2783,8 +2759,6 @@ xlog_state_switch_iclogs(xlog_t *log, ...@@ -2783,8 +2759,6 @@ xlog_state_switch_iclogs(xlog_t *log,
xlog_in_core_t *iclog, xlog_in_core_t *iclog,
int eventual_size) int eventual_size)
{ {
uint roundup;
ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE); ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
if (!eventual_size) if (!eventual_size)
eventual_size = iclog->ic_offset; eventual_size = iclog->ic_offset;
...@@ -2797,14 +2771,10 @@ xlog_state_switch_iclogs(xlog_t *log, ...@@ -2797,14 +2771,10 @@ xlog_state_switch_iclogs(xlog_t *log,
log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize); log->l_curr_block += BTOBB(eventual_size)+BTOBB(log->l_iclog_hsize);
/* Round up to next log-sunit */ /* Round up to next log-sunit */
if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb)) { if (XFS_SB_VERSION_HASLOGV2(&log->l_mp->m_sb) &&
if (log->l_curr_block & (log->l_stripemask - 1)) { log->l_mp->m_sb.sb_logsunit > 1) {
roundup = log->l_stripemask - __uint32_t sunit_bb = BTOBB(log->l_mp->m_sb.sb_logsunit);
(log->l_curr_block & (log->l_stripemask - 1)); log->l_curr_block = roundup(log->l_curr_block, sunit_bb);
} else {
roundup = 0;
}
log->l_curr_block += roundup;
} }
if (log->l_curr_block >= log->l_logBBsize) { if (log->l_curr_block >= log->l_logBBsize) {
......
...@@ -63,6 +63,9 @@ int xlog_btolrbb(int b); ...@@ -63,6 +63,9 @@ int xlog_btolrbb(int b);
#else #else
#define XLOG_BTOLRBB(b) (((b)+XLOG_RECORD_BSIZE-1) >> XLOG_RECORD_BSHIFT) #define XLOG_BTOLRBB(b) (((b)+XLOG_RECORD_BSIZE-1) >> XLOG_RECORD_BSHIFT)
#endif #endif
#define XLOG_BTOLSUNIT(log, b) (((b)+(log)->l_mp->m_sb.sb_logsunit-1) / \
(log)->l_mp->m_sb.sb_logsunit)
#define XLOG_LSUNITTOB(log, su) ((su) * (log)->l_mp->m_sb.sb_logsunit)
#define XLOG_HEADER_SIZE 512 #define XLOG_HEADER_SIZE 512
...@@ -531,7 +534,6 @@ typedef struct log { ...@@ -531,7 +534,6 @@ typedef struct log {
uint l_flags; uint l_flags;
uint l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */ uint l_quotaoffs_flag; /* XFS_DQ_*, for QUOTAOFFs */
struct xfs_buf_cancel **l_buf_cancel_table; struct xfs_buf_cancel **l_buf_cancel_table;
int l_stripemask; /* log stripe mask */
int l_iclog_hsize; /* size of iclog header */ int l_iclog_hsize; /* size of iclog header */
int l_iclog_heads; /* # of iclog header sectors */ int l_iclog_heads; /* # of iclog header sectors */
uint l_sectbb_log; /* log2 of sector size in BBs */ uint l_sectbb_log; /* log2 of sector size in BBs */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment