Commit 611c9946 authored by Dave Chinner's avatar Dave Chinner Committed by Ben Myers

xfs: make XBF_MAPPED the default behaviour

Rather than specifying XBF_MAPPED for almost all buffers, introduce
XBF_UNMAPPED for the couple of users that use unmapped buffers.
Signed-off-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarMark Tinguely <tinguely@sgi.com>
Reviewed-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarBen Myers <bpm@sgi.com>
parent d4f3512b
...@@ -65,11 +65,11 @@ xfs_buf_is_vmapped( ...@@ -65,11 +65,11 @@ xfs_buf_is_vmapped(
/* /*
* Return true if the buffer is vmapped. * Return true if the buffer is vmapped.
* *
* The XBF_MAPPED flag is set if the buffer should be mapped, but the * b_addr is null if the buffer is not mapped, but the code is clever
* code is clever enough to know it doesn't have to map a single page, * enough to know it doesn't have to map a single page, so the check has
* so the check has to be both for XBF_MAPPED and bp->b_page_count > 1. * to be both for b_addr and bp->b_page_count > 1.
*/ */
return (bp->b_flags & XBF_MAPPED) && bp->b_page_count > 1; return bp->b_addr && bp->b_page_count > 1;
} }
static inline int static inline int
...@@ -181,7 +181,7 @@ xfs_buf_alloc( ...@@ -181,7 +181,7 @@ xfs_buf_alloc(
* We don't want certain flags to appear in b_flags unless they are * We don't want certain flags to appear in b_flags unless they are
* specifically set by later operations on the buffer. * specifically set by later operations on the buffer.
*/ */
flags &= ~(XBF_MAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD); flags &= ~(XBF_UNMAPPED | XBF_TRYLOCK | XBF_ASYNC | XBF_READ_AHEAD);
atomic_set(&bp->b_hold, 1); atomic_set(&bp->b_hold, 1);
atomic_set(&bp->b_lru_ref, 1); atomic_set(&bp->b_lru_ref, 1);
...@@ -329,7 +329,7 @@ xfs_buf_allocate_memory( ...@@ -329,7 +329,7 @@ xfs_buf_allocate_memory(
bp->b_pages = bp->b_page_array; bp->b_pages = bp->b_page_array;
bp->b_pages[0] = virt_to_page(bp->b_addr); bp->b_pages[0] = virt_to_page(bp->b_addr);
bp->b_page_count = 1; bp->b_page_count = 1;
bp->b_flags |= XBF_MAPPED | _XBF_KMEM; bp->b_flags |= _XBF_KMEM;
return 0; return 0;
} }
...@@ -399,8 +399,9 @@ _xfs_buf_map_pages( ...@@ -399,8 +399,9 @@ _xfs_buf_map_pages(
if (bp->b_page_count == 1) { if (bp->b_page_count == 1) {
/* A single page buffer is always mappable */ /* A single page buffer is always mappable */
bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset; bp->b_addr = page_address(bp->b_pages[0]) + bp->b_offset;
bp->b_flags |= XBF_MAPPED; } else if (flags & XBF_UNMAPPED) {
} else if (flags & XBF_MAPPED) { bp->b_addr = NULL;
} else {
int retried = 0; int retried = 0;
do { do {
...@@ -414,7 +415,6 @@ _xfs_buf_map_pages( ...@@ -414,7 +415,6 @@ _xfs_buf_map_pages(
if (!bp->b_addr) if (!bp->b_addr)
return -ENOMEM; return -ENOMEM;
bp->b_addr += bp->b_offset; bp->b_addr += bp->b_offset;
bp->b_flags |= XBF_MAPPED;
} }
return 0; return 0;
...@@ -520,7 +520,7 @@ _xfs_buf_find( ...@@ -520,7 +520,7 @@ _xfs_buf_find(
*/ */
if (bp->b_flags & XBF_STALE) { if (bp->b_flags & XBF_STALE) {
ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0); ASSERT((bp->b_flags & _XBF_DELWRI_Q) == 0);
bp->b_flags &= XBF_MAPPED | _XBF_KMEM | _XBF_PAGES; bp->b_flags &= _XBF_KMEM | _XBF_PAGES;
} }
trace_xfs_buf_find(bp, flags, _RET_IP_); trace_xfs_buf_find(bp, flags, _RET_IP_);
...@@ -575,7 +575,7 @@ xfs_buf_get( ...@@ -575,7 +575,7 @@ xfs_buf_get(
bp->b_io_length = bp->b_length; bp->b_io_length = bp->b_length;
found: found:
if (!(bp->b_flags & XBF_MAPPED)) { if (!bp->b_addr) {
error = _xfs_buf_map_pages(bp, flags); error = _xfs_buf_map_pages(bp, flags);
if (unlikely(error)) { if (unlikely(error)) {
xfs_warn(target->bt_mount, xfs_warn(target->bt_mount,
...@@ -707,7 +707,6 @@ xfs_buf_set_empty( ...@@ -707,7 +707,6 @@ xfs_buf_set_empty(
bp->b_length = numblks; bp->b_length = numblks;
bp->b_io_length = numblks; bp->b_io_length = numblks;
bp->b_bn = XFS_BUF_DADDR_NULL; bp->b_bn = XFS_BUF_DADDR_NULL;
bp->b_flags &= ~XBF_MAPPED;
} }
static inline struct page * static inline struct page *
...@@ -759,7 +758,6 @@ xfs_buf_associate_memory( ...@@ -759,7 +758,6 @@ xfs_buf_associate_memory(
bp->b_io_length = BTOBB(len); bp->b_io_length = BTOBB(len);
bp->b_length = BTOBB(buflen); bp->b_length = BTOBB(buflen);
bp->b_flags |= XBF_MAPPED;
return 0; return 0;
} }
...@@ -790,7 +788,7 @@ xfs_buf_get_uncached( ...@@ -790,7 +788,7 @@ xfs_buf_get_uncached(
} }
bp->b_flags |= _XBF_PAGES; bp->b_flags |= _XBF_PAGES;
error = _xfs_buf_map_pages(bp, XBF_MAPPED); error = _xfs_buf_map_pages(bp, 0);
if (unlikely(error)) { if (unlikely(error)) {
xfs_warn(target->bt_mount, xfs_warn(target->bt_mount,
"%s: failed to map pages\n", __func__); "%s: failed to map pages\n", __func__);
...@@ -1287,7 +1285,7 @@ xfs_buf_offset( ...@@ -1287,7 +1285,7 @@ xfs_buf_offset(
{ {
struct page *page; struct page *page;
if (bp->b_flags & XBF_MAPPED) if (bp->b_addr)
return bp->b_addr + offset; return bp->b_addr + offset;
offset += bp->b_offset; offset += bp->b_offset;
......
...@@ -41,7 +41,6 @@ typedef enum { ...@@ -41,7 +41,6 @@ typedef enum {
#define XBF_READ (1 << 0) /* buffer intended for reading from device */ #define XBF_READ (1 << 0) /* buffer intended for reading from device */
#define XBF_WRITE (1 << 1) /* buffer intended for writing to device */ #define XBF_WRITE (1 << 1) /* buffer intended for writing to device */
#define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */ #define XBF_READ_AHEAD (1 << 2) /* asynchronous read-ahead */
#define XBF_MAPPED (1 << 3) /* buffer mapped (b_addr valid) */
#define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */ #define XBF_ASYNC (1 << 4) /* initiator will not wait for completion */
#define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */ #define XBF_DONE (1 << 5) /* all pages in the buffer uptodate */
#define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */ #define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */
...@@ -53,6 +52,7 @@ typedef enum { ...@@ -53,6 +52,7 @@ typedef enum {
/* flags used only as arguments to access routines */ /* flags used only as arguments to access routines */
#define XBF_TRYLOCK (1 << 16)/* lock requested, but do not wait */ #define XBF_TRYLOCK (1 << 16)/* lock requested, but do not wait */
#define XBF_UNMAPPED (1 << 17)/* do not map the buffer */
/* flags used only internally */ /* flags used only internally */
#define _XBF_PAGES (1 << 20)/* backed by refcounted pages */ #define _XBF_PAGES (1 << 20)/* backed by refcounted pages */
...@@ -65,7 +65,6 @@ typedef unsigned int xfs_buf_flags_t; ...@@ -65,7 +65,6 @@ typedef unsigned int xfs_buf_flags_t;
{ XBF_READ, "READ" }, \ { XBF_READ, "READ" }, \
{ XBF_WRITE, "WRITE" }, \ { XBF_WRITE, "WRITE" }, \
{ XBF_READ_AHEAD, "READ_AHEAD" }, \ { XBF_READ_AHEAD, "READ_AHEAD" }, \
{ XBF_MAPPED, "MAPPED" }, \
{ XBF_ASYNC, "ASYNC" }, \ { XBF_ASYNC, "ASYNC" }, \
{ XBF_DONE, "DONE" }, \ { XBF_DONE, "DONE" }, \
{ XBF_STALE, "STALE" }, \ { XBF_STALE, "STALE" }, \
...@@ -73,6 +72,7 @@ typedef unsigned int xfs_buf_flags_t; ...@@ -73,6 +72,7 @@ typedef unsigned int xfs_buf_flags_t;
{ XBF_FUA, "FUA" }, \ { XBF_FUA, "FUA" }, \
{ XBF_FLUSH, "FLUSH" }, \ { XBF_FLUSH, "FLUSH" }, \
{ XBF_TRYLOCK, "TRYLOCK" }, /* should never be set */\ { XBF_TRYLOCK, "TRYLOCK" }, /* should never be set */\
{ XBF_UNMAPPED, "UNMAPPED" }, /* ditto */\
{ _XBF_PAGES, "PAGES" }, \ { _XBF_PAGES, "PAGES" }, \
{ _XBF_KMEM, "KMEM" }, \ { _XBF_KMEM, "KMEM" }, \
{ _XBF_DELWRI_Q, "DELWRI_Q" } { _XBF_DELWRI_Q, "DELWRI_Q" }
......
...@@ -190,7 +190,7 @@ xfs_growfs_data_private( ...@@ -190,7 +190,7 @@ xfs_growfs_data_private(
*/ */
bp = xfs_buf_get(mp->m_ddev_targp, bp = xfs_buf_get(mp->m_ddev_targp,
XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)), XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
XFS_FSS_TO_BB(mp, 1), XBF_MAPPED); XFS_FSS_TO_BB(mp, 1), 0);
if (!bp) { if (!bp) {
error = ENOMEM; error = ENOMEM;
goto error0; goto error0;
...@@ -227,7 +227,7 @@ xfs_growfs_data_private( ...@@ -227,7 +227,7 @@ xfs_growfs_data_private(
*/ */
bp = xfs_buf_get(mp->m_ddev_targp, bp = xfs_buf_get(mp->m_ddev_targp,
XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)),
XFS_FSS_TO_BB(mp, 1), XBF_MAPPED); XFS_FSS_TO_BB(mp, 1), 0);
if (!bp) { if (!bp) {
error = ENOMEM; error = ENOMEM;
goto error0; goto error0;
...@@ -256,7 +256,7 @@ xfs_growfs_data_private( ...@@ -256,7 +256,7 @@ xfs_growfs_data_private(
*/ */
bp = xfs_buf_get(mp->m_ddev_targp, bp = xfs_buf_get(mp->m_ddev_targp,
XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)), XFS_AGB_TO_DADDR(mp, agno, XFS_BNO_BLOCK(mp)),
BTOBB(mp->m_sb.sb_blocksize), XBF_MAPPED); BTOBB(mp->m_sb.sb_blocksize), 0);
if (!bp) { if (!bp) {
error = ENOMEM; error = ENOMEM;
goto error0; goto error0;
...@@ -282,7 +282,7 @@ xfs_growfs_data_private( ...@@ -282,7 +282,7 @@ xfs_growfs_data_private(
*/ */
bp = xfs_buf_get(mp->m_ddev_targp, bp = xfs_buf_get(mp->m_ddev_targp,
XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)), XFS_AGB_TO_DADDR(mp, agno, XFS_CNT_BLOCK(mp)),
BTOBB(mp->m_sb.sb_blocksize), XBF_MAPPED); BTOBB(mp->m_sb.sb_blocksize), 0);
if (!bp) { if (!bp) {
error = ENOMEM; error = ENOMEM;
goto error0; goto error0;
...@@ -309,7 +309,7 @@ xfs_growfs_data_private( ...@@ -309,7 +309,7 @@ xfs_growfs_data_private(
*/ */
bp = xfs_buf_get(mp->m_ddev_targp, bp = xfs_buf_get(mp->m_ddev_targp,
XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)), XFS_AGB_TO_DADDR(mp, agno, XFS_IBT_BLOCK(mp)),
BTOBB(mp->m_sb.sb_blocksize), XBF_MAPPED); BTOBB(mp->m_sb.sb_blocksize), 0);
if (!bp) { if (!bp) {
error = ENOMEM; error = ENOMEM;
goto error0; goto error0;
......
...@@ -150,6 +150,7 @@ xfs_imap_to_bp( ...@@ -150,6 +150,7 @@ xfs_imap_to_bp(
int ni; int ni;
xfs_buf_t *bp; xfs_buf_t *bp;
buf_flags |= XBF_UNMAPPED;
error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno, error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap->im_blkno,
(int)imap->im_len, buf_flags, &bp); (int)imap->im_len, buf_flags, &bp);
if (error) { if (error) {
......
...@@ -2131,8 +2131,8 @@ xlog_recover_buffer_pass2( ...@@ -2131,8 +2131,8 @@ xlog_recover_buffer_pass2(
trace_xfs_log_recover_buf_recover(log, buf_f); trace_xfs_log_recover_buf_recover(log, buf_f);
buf_flags = 0; buf_flags = 0;
if (!(buf_f->blf_flags & XFS_BLF_INODE_BUF)) if (buf_f->blf_flags & XFS_BLF_INODE_BUF)
buf_flags |= XBF_MAPPED; buf_flags |= XBF_UNMAPPED;
bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len, bp = xfs_buf_read(mp->m_ddev_targp, buf_f->blf_blkno, buf_f->blf_len,
buf_flags); buf_flags);
......
...@@ -138,9 +138,6 @@ xfs_trans_get_buf(xfs_trans_t *tp, ...@@ -138,9 +138,6 @@ xfs_trans_get_buf(xfs_trans_t *tp,
xfs_buf_t *bp; xfs_buf_t *bp;
xfs_buf_log_item_t *bip; xfs_buf_log_item_t *bip;
if (flags == 0)
flags = XBF_MAPPED;
/* /*
* Default to a normal get_buf() call if the tp is NULL. * Default to a normal get_buf() call if the tp is NULL.
*/ */
...@@ -264,9 +261,6 @@ xfs_trans_read_buf( ...@@ -264,9 +261,6 @@ xfs_trans_read_buf(
*bpp = NULL; *bpp = NULL;
if (flags == 0)
flags = XBF_MAPPED;
/* /*
* Default to a normal get_buf() call if the tp is NULL. * Default to a normal get_buf() call if the tp is NULL.
*/ */
......
...@@ -79,8 +79,7 @@ xfs_readlink_bmap( ...@@ -79,8 +79,7 @@ xfs_readlink_bmap(
d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock); d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount); byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt), bp = xfs_buf_read(mp->m_ddev_targp, d, BTOBB(byte_cnt), 0);
XBF_MAPPED);
if (!bp) if (!bp)
return XFS_ERROR(ENOMEM); return XFS_ERROR(ENOMEM);
error = bp->b_error; error = bp->b_error;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment