Commit 0564501f authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Darrick J. Wong

xfs: remove unused buffer cache APIs

Now that the log code uses bios directly we can drop various special
cases in the buffer cache code.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Reviewed-by: default avatarDave Chinner <dchinner@redhat.com>
Reviewed-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
Signed-off-by: default avatarDarrick J. Wong <darrick.wong@oracle.com>
parent 6e9b3dd8
...@@ -214,7 +214,7 @@ xfs_buf_free_maps( ...@@ -214,7 +214,7 @@ xfs_buf_free_maps(
} }
} }
struct xfs_buf * static struct xfs_buf *
_xfs_buf_alloc( _xfs_buf_alloc(
struct xfs_buftarg *target, struct xfs_buftarg *target,
struct xfs_buf_map *map, struct xfs_buf_map *map,
...@@ -910,72 +910,6 @@ xfs_buf_read_uncached( ...@@ -910,72 +910,6 @@ xfs_buf_read_uncached(
return 0; return 0;
} }
/*
* Return a buffer allocated as an empty buffer and associated to external
* memory via xfs_buf_associate_memory() back to it's empty state.
*/
void
xfs_buf_set_empty(
struct xfs_buf *bp,
size_t numblks)
{
if (bp->b_pages)
_xfs_buf_free_pages(bp);
bp->b_pages = NULL;
bp->b_page_count = 0;
bp->b_addr = NULL;
bp->b_length = numblks;
bp->b_io_length = numblks;
ASSERT(bp->b_map_count == 1);
bp->b_bn = XFS_BUF_DADDR_NULL;
bp->b_maps[0].bm_bn = XFS_BUF_DADDR_NULL;
bp->b_maps[0].bm_len = bp->b_length;
}
int
xfs_buf_associate_memory(
xfs_buf_t *bp,
void *mem,
size_t len)
{
int rval;
int i = 0;
unsigned long pageaddr;
unsigned long offset;
size_t buflen;
int page_count;
pageaddr = (unsigned long)mem & PAGE_MASK;
offset = (unsigned long)mem - pageaddr;
buflen = PAGE_ALIGN(len + offset);
page_count = buflen >> PAGE_SHIFT;
/* Free any previous set of page pointers */
if (bp->b_pages)
_xfs_buf_free_pages(bp);
bp->b_pages = NULL;
bp->b_addr = mem;
rval = _xfs_buf_get_pages(bp, page_count);
if (rval)
return rval;
bp->b_offset = offset;
for (i = 0; i < bp->b_page_count; i++) {
bp->b_pages[i] = kmem_to_page((void *)pageaddr);
pageaddr += PAGE_SIZE;
}
bp->b_io_length = BTOBB(len);
bp->b_length = BTOBB(buflen);
return 0;
}
xfs_buf_t * xfs_buf_t *
xfs_buf_get_uncached( xfs_buf_get_uncached(
struct xfs_buftarg *target, struct xfs_buftarg *target,
...@@ -1259,7 +1193,7 @@ xfs_buf_ioend_async( ...@@ -1259,7 +1193,7 @@ xfs_buf_ioend_async(
struct xfs_buf *bp) struct xfs_buf *bp)
{ {
INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work); INIT_WORK(&bp->b_ioend_work, xfs_buf_ioend_work);
queue_work(bp->b_ioend_wq, &bp->b_ioend_work); queue_work(bp->b_target->bt_mount->m_buf_workqueue, &bp->b_ioend_work);
} }
void void
...@@ -1426,21 +1360,8 @@ _xfs_buf_ioapply( ...@@ -1426,21 +1360,8 @@ _xfs_buf_ioapply(
*/ */
bp->b_error = 0; bp->b_error = 0;
/*
* Initialize the I/O completion workqueue if we haven't yet or the
* submitter has not opted to specify a custom one.
*/
if (!bp->b_ioend_wq)
bp->b_ioend_wq = bp->b_target->bt_mount->m_buf_workqueue;
if (bp->b_flags & XBF_WRITE) { if (bp->b_flags & XBF_WRITE) {
op = REQ_OP_WRITE; op = REQ_OP_WRITE;
if (bp->b_flags & XBF_SYNCIO)
op_flags = REQ_SYNC;
if (bp->b_flags & XBF_FUA)
op_flags |= REQ_FUA;
if (bp->b_flags & XBF_FLUSH)
op_flags |= REQ_PREFLUSH;
/* /*
* Run the write verifier callback function if it exists. If * Run the write verifier callback function if it exists. If
......
...@@ -30,11 +30,6 @@ ...@@ -30,11 +30,6 @@
#define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */ #define XBF_STALE (1 << 6) /* buffer has been staled, do not find it */
#define XBF_WRITE_FAIL (1 << 7) /* async writes have failed on this buffer */ #define XBF_WRITE_FAIL (1 << 7) /* async writes have failed on this buffer */
/* I/O hints for the BIO layer */
#define XBF_SYNCIO (1 << 10)/* treat this buffer as synchronous I/O */
#define XBF_FUA (1 << 11)/* force cache write through mode */
#define XBF_FLUSH (1 << 12)/* flush the disk cache before a write */
/* flags used only as arguments to access routines */ /* flags used only as arguments to access routines */
#define XBF_TRYLOCK (1 << 16)/* lock requested, but do not wait */ #define XBF_TRYLOCK (1 << 16)/* lock requested, but do not wait */
#define XBF_UNMAPPED (1 << 17)/* do not map the buffer */ #define XBF_UNMAPPED (1 << 17)/* do not map the buffer */
...@@ -55,9 +50,6 @@ typedef unsigned int xfs_buf_flags_t; ...@@ -55,9 +50,6 @@ typedef unsigned int xfs_buf_flags_t;
{ XBF_DONE, "DONE" }, \ { XBF_DONE, "DONE" }, \
{ XBF_STALE, "STALE" }, \ { XBF_STALE, "STALE" }, \
{ XBF_WRITE_FAIL, "WRITE_FAIL" }, \ { XBF_WRITE_FAIL, "WRITE_FAIL" }, \
{ XBF_SYNCIO, "SYNCIO" }, \
{ XBF_FUA, "FUA" }, \
{ XBF_FLUSH, "FLUSH" }, \
{ XBF_TRYLOCK, "TRYLOCK" }, /* should never be set */\ { XBF_TRYLOCK, "TRYLOCK" }, /* should never be set */\
{ XBF_UNMAPPED, "UNMAPPED" }, /* ditto */\ { XBF_UNMAPPED, "UNMAPPED" }, /* ditto */\
{ _XBF_PAGES, "PAGES" }, \ { _XBF_PAGES, "PAGES" }, \
...@@ -156,7 +148,6 @@ typedef struct xfs_buf { ...@@ -156,7 +148,6 @@ typedef struct xfs_buf {
xfs_buftarg_t *b_target; /* buffer target (device) */ xfs_buftarg_t *b_target; /* buffer target (device) */
void *b_addr; /* virtual address of buffer */ void *b_addr; /* virtual address of buffer */
struct work_struct b_ioend_work; struct work_struct b_ioend_work;
struct workqueue_struct *b_ioend_wq; /* I/O completion wq */
xfs_buf_iodone_t b_iodone; /* I/O completion function */ xfs_buf_iodone_t b_iodone; /* I/O completion function */
struct completion b_iowait; /* queue for I/O waiters */ struct completion b_iowait; /* queue for I/O waiters */
void *b_log_item; void *b_log_item;
...@@ -201,21 +192,6 @@ struct xfs_buf *xfs_buf_incore(struct xfs_buftarg *target, ...@@ -201,21 +192,6 @@ struct xfs_buf *xfs_buf_incore(struct xfs_buftarg *target,
xfs_daddr_t blkno, size_t numblks, xfs_daddr_t blkno, size_t numblks,
xfs_buf_flags_t flags); xfs_buf_flags_t flags);
struct xfs_buf *_xfs_buf_alloc(struct xfs_buftarg *target,
struct xfs_buf_map *map, int nmaps,
xfs_buf_flags_t flags);
static inline struct xfs_buf *
xfs_buf_alloc(
struct xfs_buftarg *target,
xfs_daddr_t blkno,
size_t numblks,
xfs_buf_flags_t flags)
{
DEFINE_SINGLE_BUF_MAP(map, blkno, numblks);
return _xfs_buf_alloc(target, &map, 1, flags);
}
struct xfs_buf *xfs_buf_get_map(struct xfs_buftarg *target, struct xfs_buf *xfs_buf_get_map(struct xfs_buftarg *target,
struct xfs_buf_map *map, int nmaps, struct xfs_buf_map *map, int nmaps,
xfs_buf_flags_t flags); xfs_buf_flags_t flags);
...@@ -260,9 +236,6 @@ xfs_buf_readahead( ...@@ -260,9 +236,6 @@ xfs_buf_readahead(
return xfs_buf_readahead_map(target, &map, 1, ops); return xfs_buf_readahead_map(target, &map, 1, ops);
} }
void xfs_buf_set_empty(struct xfs_buf *bp, size_t numblks);
int xfs_buf_associate_memory(struct xfs_buf *bp, void *mem, size_t length);
struct xfs_buf *xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks, struct xfs_buf *xfs_buf_get_uncached(struct xfs_buftarg *target, size_t numblks,
int flags); int flags);
int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr, int xfs_buf_read_uncached(struct xfs_buftarg *target, xfs_daddr_t daddr,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment