Commit 1b4623d6 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] move the buffer_head IO functions into buffer.c

Patch from Christoph Hellwig.

Move the buffer_head-based IO functions out of ll_rw_blk.c and into
fs/buffer.c.  So the buffer IO functions are all in buffer.c, and
ll_rw_blk.c knows nothing about buffer_heads.

This patch has been acked by Jens.
parent 9c389aa6
......@@ -12,38 +12,21 @@
/*
* This handles all read/write requests to block devices
*/
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/kernel_stat.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
#include <linux/bio.h>
#include <linux/blk.h>
#include <linux/highmem.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/kernel_stat.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/smp_lock.h>
#include <linux/bootmem.h>
#include <linux/bootmem.h> /* for max_pfn/max_low_pfn */
#include <linux/completion.h>
#include <linux/compiler.h>
#include <linux/buffer_head.h>
#include <scsi/scsi.h>
#include <linux/backing-dev.h>
#include <asm/system.h>
#include <asm/io.h>
#include <linux/blk.h>
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/module.h>
/*
* MAC Floppy IWM hooks
*/
#ifdef CONFIG_MAC_FLOPPY_IWM
extern int mac_floppy_init(void);
#endif
/*
* For the allocated request tables
......@@ -1715,17 +1698,6 @@ void generic_make_request(struct bio *bio)
} while (ret);
}
/*
* our default bio end_io callback handler for a buffer_head mapping.
*/
static void end_bio_bh_io_sync(struct bio *bio)
{
struct buffer_head *bh = bio->bi_private;
bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
bio_put(bio);
}
/**
* submit_bio: submit a bio to the block device layer for I/O
* @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
......@@ -1759,161 +1731,6 @@ int submit_bio(int rw, struct bio *bio)
return 1;
}
/**
* submit_bh: submit a buffer_head to the block device layer for I/O
* @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
* @bh: The &struct buffer_head which describes the I/O
*
**/
int submit_bh(int rw, struct buffer_head * bh)
{
struct bio *bio;
BUG_ON(!buffer_locked(bh));
BUG_ON(!buffer_mapped(bh));
BUG_ON(!bh->b_end_io);
if ((rw == READ || rw == READA) && buffer_uptodate(bh))
buffer_error();
if (rw == WRITE && !buffer_uptodate(bh))
buffer_error();
if (rw == READ && buffer_dirty(bh))
buffer_error();
set_buffer_req(bh);
/*
* from here on down, it's all bio -- do the initial mapping,
* submit_bio -> generic_make_request may further map this bio around
*/
bio = bio_alloc(GFP_NOIO, 1);
bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio->bi_bdev = bh->b_bdev;
bio->bi_io_vec[0].bv_page = bh->b_page;
bio->bi_io_vec[0].bv_len = bh->b_size;
bio->bi_io_vec[0].bv_offset = bh_offset(bh);
bio->bi_vcnt = 1;
bio->bi_idx = 0;
bio->bi_size = bh->b_size;
bio->bi_end_io = end_bio_bh_io_sync;
bio->bi_private = bh;
return submit_bio(rw, bio);
}
/**
* ll_rw_block: low-level access to block devices
* @rw: whether to %READ or %WRITE or maybe %READA (readahead)
* @nr: number of &struct buffer_heads in the array
* @bhs: array of pointers to &struct buffer_head
*
* ll_rw_block() takes an array of pointers to &struct buffer_heads,
* and requests an I/O operation on them, either a %READ or a %WRITE.
* The third %READA option is described in the documentation for
* generic_make_request() which ll_rw_block() calls.
*
* This function provides extra functionality that is not in
* generic_make_request() that is relevant to buffers in the buffer
* cache or page cache. In particular it drops any buffer that it
* cannot get a lock on (with the BH_Lock state bit), any buffer that
* appears to be clean when doing a write request, and any buffer that
* appears to be up-to-date when doing read request. Further it marks
* as clean buffers that are processed for writing (the buffer cache
* wont assume that they are actually clean until the buffer gets
* unlocked).
*
* ll_rw_block sets b_end_io to simple completion handler that marks
* the buffer up-to-date (if approriate), unlocks the buffer and wakes
* any waiters. As client that needs a more interesting completion
* routine should call submit_bh() (or generic_make_request())
* directly.
*
* Caveat:
* All of the buffers must be for the same device, and must also be
* a multiple of the current approved size for the device.
*
**/
void ll_rw_block(int rw, int nr, struct buffer_head * bhs[])
{
unsigned int major;
int correct_size;
int i;
if (!nr)
return;
major = major(to_kdev_t(bhs[0]->b_bdev->bd_dev));
/* Determine correct block size for this device. */
correct_size = bdev_hardsect_size(bhs[0]->b_bdev);
/* Verify requested block sizes. */
for (i = 0; i < nr; i++) {
struct buffer_head *bh = bhs[i];
if (bh->b_size & (correct_size - 1)) {
printk(KERN_NOTICE "ll_rw_block: device %s: "
"only %d-char blocks implemented (%u)\n",
bdevname(bhs[0]->b_bdev),
correct_size, bh->b_size);
goto sorry;
}
}
if ((rw & WRITE) && bdev_read_only(bhs[0]->b_bdev)) {
printk(KERN_NOTICE "Can't write to read-only device %s\n",
bdevname(bhs[0]->b_bdev));
goto sorry;
}
for (i = 0; i < nr; i++) {
struct buffer_head *bh = bhs[i];
/* Only one thread can actually submit the I/O. */
if (test_set_buffer_locked(bh))
continue;
/* We have the buffer lock */
atomic_inc(&bh->b_count);
bh->b_end_io = end_buffer_io_sync;
switch(rw) {
case WRITE:
if (!test_clear_buffer_dirty(bh))
/* Hmmph! Nothing to write */
goto end_io;
break;
case READA:
case READ:
if (buffer_uptodate(bh))
/* Hmmph! Already have it */
goto end_io;
break;
default:
BUG();
end_io:
bh->b_end_io(bh, buffer_uptodate(bh));
continue;
}
submit_bh(rw, bh);
}
return;
sorry:
/* Make sure we don't get infinite dirty retries.. */
for (i = 0; i < nr; i++)
clear_buffer_dirty(bhs[i]);
}
#ifdef CONFIG_STRAM_SWAP
extern int stram_device_init (void);
#endif
inline void blk_recalc_rq_segments(struct request *rq)
{
struct bio *bio;
......@@ -1962,8 +1779,8 @@ inline void blk_recalc_rq_sectors(struct request *rq, int nsect)
* @nr_sectors: number of sectors to end I/O on
*
* Description:
* Ends I/O on the first buffer attached to @req, and sets it up
* for the next buffer_head (if any) in the cluster.
* Ends I/O on a number of sectors attached to @req, and sets it up
* for the next range of segments (if any) in the cluster.
*
* Return:
* 0 - we are done with this request, call end_that_request_last()
......
......@@ -35,6 +35,7 @@
#include <linux/hash.h>
#include <linux/suspend.h>
#include <linux/buffer_head.h>
#include <linux/bio.h>
#include <asm/bitops.h>
static void invalidate_bh_lrus(void);
......@@ -2412,6 +2413,151 @@ int brw_kiovec(int rw, int nr, struct kiobuf *iovec[],
return err ? err : transferred;
}
static void end_bio_bh_io_sync(struct bio *bio)
{
struct buffer_head *bh = bio->bi_private;
bh->b_end_io(bh, test_bit(BIO_UPTODATE, &bio->bi_flags));
bio_put(bio);
}
int submit_bh(int rw, struct buffer_head * bh)
{
struct bio *bio;
BUG_ON(!buffer_locked(bh));
BUG_ON(!buffer_mapped(bh));
BUG_ON(!bh->b_end_io);
if ((rw == READ || rw == READA) && buffer_uptodate(bh))
buffer_error();
if (rw == WRITE && !buffer_uptodate(bh))
buffer_error();
if (rw == READ && buffer_dirty(bh))
buffer_error();
set_buffer_req(bh);
/*
* from here on down, it's all bio -- do the initial mapping,
* submit_bio -> generic_make_request may further map this bio around
*/
bio = bio_alloc(GFP_NOIO, 1);
bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio->bi_bdev = bh->b_bdev;
bio->bi_io_vec[0].bv_page = bh->b_page;
bio->bi_io_vec[0].bv_len = bh->b_size;
bio->bi_io_vec[0].bv_offset = bh_offset(bh);
bio->bi_vcnt = 1;
bio->bi_idx = 0;
bio->bi_size = bh->b_size;
bio->bi_end_io = end_bio_bh_io_sync;
bio->bi_private = bh;
return submit_bio(rw, bio);
}
/**
* ll_rw_block: low-level access to block devices (DEPRECATED)
* @rw: whether to %READ or %WRITE or maybe %READA (readahead)
* @nr: number of &struct buffer_heads in the array
* @bhs: array of pointers to &struct buffer_head
*
* ll_rw_block() takes an array of pointers to &struct buffer_heads,
* and requests an I/O operation on them, either a %READ or a %WRITE.
* The third %READA option is described in the documentation for
* generic_make_request() which ll_rw_block() calls.
*
* This function drops any buffer that it cannot get a lock on (with the
* BH_Lock state bit), any buffer that appears to be clean when doing a
* write request, and any buffer that appears to be up-to-date when doing
* read request. Further it marks as clean buffers that are processed for
* writing (the buffer cache wont assume that they are actually clean until
* the buffer gets unlocked).
*
* ll_rw_block sets b_end_io to simple completion handler that marks
* the buffer up-to-date (if approriate), unlocks the buffer and wakes
* any waiters.
*
* All of the buffers must be for the same device, and must also be a
* multiple of the current approved size for the device.
*/
void ll_rw_block(int rw, int nr, struct buffer_head * bhs[])
{
unsigned int major;
int correct_size;
int i;
if (!nr)
return;
major = major(to_kdev_t(bhs[0]->b_bdev->bd_dev));
/* Determine correct block size for this device. */
correct_size = bdev_hardsect_size(bhs[0]->b_bdev);
/* Verify requested block sizes. */
for (i = 0; i < nr; i++) {
struct buffer_head *bh = bhs[i];
if (bh->b_size & (correct_size - 1)) {
printk(KERN_NOTICE "ll_rw_block: device %s: "
"only %d-char blocks implemented (%u)\n",
bdevname(bhs[0]->b_bdev),
correct_size, bh->b_size);
goto sorry;
}
}
if ((rw & WRITE) && bdev_read_only(bhs[0]->b_bdev)) {
printk(KERN_NOTICE "Can't write to read-only device %s\n",
bdevname(bhs[0]->b_bdev));
goto sorry;
}
for (i = 0; i < nr; i++) {
struct buffer_head *bh = bhs[i];
/* Only one thread can actually submit the I/O. */
if (test_set_buffer_locked(bh))
continue;
/* We have the buffer lock */
atomic_inc(&bh->b_count);
bh->b_end_io = end_buffer_io_sync;
switch(rw) {
case WRITE:
if (!test_clear_buffer_dirty(bh))
/* Hmmph! Nothing to write */
goto end_io;
break;
case READA:
case READ:
if (buffer_uptodate(bh))
/* Hmmph! Already have it */
goto end_io;
break;
default:
BUG();
end_io:
bh->b_end_io(bh, buffer_uptodate(bh));
continue;
}
submit_bh(rw, bh);
}
return;
sorry:
/* Make sure we don't get infinite dirty retries.. */
for (i = 0; i < nr; i++)
clear_buffer_dirty(bhs[i]);
}
/*
* Sanity checks for try_to_free_buffers.
*/
......
......@@ -167,6 +167,8 @@ void wakeup_bdflush(void);
struct buffer_head *alloc_buffer_head(void);
void free_buffer_head(struct buffer_head * bh);
void FASTCALL(unlock_buffer(struct buffer_head *bh));
void ll_rw_block(int, int, struct buffer_head * bh[]);
int submit_bh(int, struct buffer_head *);
extern int buffer_heads_over_limit;
/*
......
......@@ -1231,8 +1231,6 @@ static inline void insert_inode_hash(struct inode *inode) {
extern struct file * get_empty_filp(void);
extern void file_move(struct file *f, struct list_head *list);
extern void ll_rw_block(int, int, struct buffer_head * bh[]);
extern int submit_bh(int, struct buffer_head *);
struct bio;
extern int submit_bio(int, struct bio *);
extern int bdev_read_only(struct block_device *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment