Commit 39e8cdf7 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] cleanup of bh->flags

Moves all buffer_head-related stuff out of linux/fs.h and into
linux/buffer_head.h.  buffer_head.h is currently included at the very
end of fs.h.  So it is possible to include buffer_head directly from
all .c files and remove this nested include.

Also rationalises all the set_buffer_foo() and mark_buffer_bar()
functions.  We have:

	set_buffer_foo(bh)
	clear_buffer_foo(bh)
	buffer_foo(bh)

and, in some cases, where needed:

	test_set_buffer_foo(bh)
	test_clear_buffer_foo(bh)

And that's it.

BUFFER_FNS() and TAS_BUFFER_FNS() macros generate all the above real
inline functions.  Normally not a big fan of cpp abuse, but in this
case it fits.  These function-generating macros are available to
filesystems to expand their own b_state functions.  JBD uses this in
one case.
parent 411973b4
...@@ -1405,7 +1405,7 @@ int submit_bh(int rw, struct buffer_head * bh) ...@@ -1405,7 +1405,7 @@ int submit_bh(int rw, struct buffer_head * bh)
{ {
struct bio *bio; struct bio *bio;
BUG_ON(!test_bit(BH_Lock, &bh->b_state)); BUG_ON(!buffer_locked(bh));
BUG_ON(!buffer_mapped(bh)); BUG_ON(!buffer_mapped(bh));
BUG_ON(!bh->b_end_io); BUG_ON(!bh->b_end_io);
...@@ -1414,7 +1414,7 @@ int submit_bh(int rw, struct buffer_head * bh) ...@@ -1414,7 +1414,7 @@ int submit_bh(int rw, struct buffer_head * bh)
if (rw == WRITE && !buffer_uptodate(bh)) if (rw == WRITE && !buffer_uptodate(bh))
printk("%s: write of non-uptodate buffer\n", __FUNCTION__); printk("%s: write of non-uptodate buffer\n", __FUNCTION__);
set_bit(BH_Req, &bh->b_state); set_buffer_req(bh);
/* /*
* from here on down, it's all bio -- do the initial mapping, * from here on down, it's all bio -- do the initial mapping,
...@@ -1507,7 +1507,7 @@ void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]) ...@@ -1507,7 +1507,7 @@ void ll_rw_block(int rw, int nr, struct buffer_head * bhs[])
struct buffer_head *bh = bhs[i]; struct buffer_head *bh = bhs[i];
/* Only one thread can actually submit the I/O. */ /* Only one thread can actually submit the I/O. */
if (test_and_set_bit(BH_Lock, &bh->b_state)) if (test_set_buffer_locked(bh))
continue; continue;
/* We have the buffer lock */ /* We have the buffer lock */
...@@ -1516,7 +1516,7 @@ void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]) ...@@ -1516,7 +1516,7 @@ void ll_rw_block(int rw, int nr, struct buffer_head * bhs[])
switch(rw) { switch(rw) {
case WRITE: case WRITE:
if (!atomic_set_buffer_clean(bh)) if (!test_clear_buffer_dirty(bh))
/* Hmmph! Nothing to write */ /* Hmmph! Nothing to write */
goto end_io; goto end_io;
break; break;
...@@ -1530,7 +1530,7 @@ void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]) ...@@ -1530,7 +1530,7 @@ void ll_rw_block(int rw, int nr, struct buffer_head * bhs[])
default: default:
BUG(); BUG();
end_io: end_io:
bh->b_end_io(bh, test_bit(BH_Uptodate, &bh->b_state)); bh->b_end_io(bh, buffer_uptodate(bh));
continue; continue;
} }
...@@ -1541,7 +1541,7 @@ void ll_rw_block(int rw, int nr, struct buffer_head * bhs[]) ...@@ -1541,7 +1541,7 @@ void ll_rw_block(int rw, int nr, struct buffer_head * bhs[])
sorry: sorry:
/* Make sure we don't get infinite dirty retries.. */ /* Make sure we don't get infinite dirty retries.. */
for (i = 0; i < nr; i++) for (i = 0; i < nr; i++)
mark_buffer_clean(bhs[i]); clear_buffer_dirty(bhs[i]);
} }
#ifdef CONFIG_STRAM_SWAP #ifdef CONFIG_STRAM_SWAP
......
...@@ -211,7 +211,7 @@ static inline void init_stripe(struct stripe_head *sh, unsigned long sector) ...@@ -211,7 +211,7 @@ static inline void init_stripe(struct stripe_head *sh, unsigned long sector)
buffer_locked(sh->bh_cache[i])); buffer_locked(sh->bh_cache[i]));
BUG(); BUG();
} }
clear_bit(BH_Uptodate, &sh->bh_cache[i]->b_state); clear_buffer_uptodate(sh->bh_cache[i]);
raid5_build_block(sh, i); raid5_build_block(sh, i);
} }
insert_hash(conf, sh); insert_hash(conf, sh);
...@@ -411,7 +411,7 @@ static void raid5_end_read_request (struct buffer_head * bh, int uptodate) ...@@ -411,7 +411,7 @@ static void raid5_end_read_request (struct buffer_head * bh, int uptodate)
buffer = NULL; buffer = NULL;
spin_unlock_irqrestore(&conf->device_lock, flags); spin_unlock_irqrestore(&conf->device_lock, flags);
if (sh->bh_page[i]==NULL) if (sh->bh_page[i]==NULL)
set_bit(BH_Uptodate, &bh->b_state); set_buffer_uptodate(bh);
if (buffer) { if (buffer) {
if (buffer->b_page != bh->b_page) if (buffer->b_page != bh->b_page)
memcpy(buffer->b_data, bh->b_data, bh->b_size); memcpy(buffer->b_data, bh->b_data, bh->b_size);
...@@ -419,16 +419,16 @@ static void raid5_end_read_request (struct buffer_head * bh, int uptodate) ...@@ -419,16 +419,16 @@ static void raid5_end_read_request (struct buffer_head * bh, int uptodate)
} }
} else { } else {
md_error(conf->mddev, bh->b_bdev); md_error(conf->mddev, bh->b_bdev);
clear_bit(BH_Uptodate, &bh->b_state); clear_buffer_uptodate(bh);
} }
/* must restore b_page before unlocking buffer... */ /* must restore b_page before unlocking buffer... */
if (sh->bh_page[i]) { if (sh->bh_page[i]) {
bh->b_page = sh->bh_page[i]; bh->b_page = sh->bh_page[i];
bh->b_data = page_address(bh->b_page); bh->b_data = page_address(bh->b_page);
sh->bh_page[i] = NULL; sh->bh_page[i] = NULL;
clear_bit(BH_Uptodate, &bh->b_state); clear_buffer_uptodate(bh);
} }
clear_bit(BH_Lock, &bh->b_state); clear_buffer_locked(bh);
set_bit(STRIPE_HANDLE, &sh->state); set_bit(STRIPE_HANDLE, &sh->state);
release_stripe(sh); release_stripe(sh);
} }
...@@ -453,7 +453,7 @@ static void raid5_end_write_request (struct buffer_head *bh, int uptodate) ...@@ -453,7 +453,7 @@ static void raid5_end_write_request (struct buffer_head *bh, int uptodate)
md_spin_lock_irqsave(&conf->device_lock, flags); md_spin_lock_irqsave(&conf->device_lock, flags);
if (!uptodate) if (!uptodate)
md_error(conf->mddev, bh->b_bdev); md_error(conf->mddev, bh->b_bdev);
clear_bit(BH_Lock, &bh->b_state); clear_buffer_locked(bh);
set_bit(STRIPE_HANDLE, &sh->state); set_bit(STRIPE_HANDLE, &sh->state);
__release_stripe(conf, sh); __release_stripe(conf, sh);
md_spin_unlock_irqrestore(&conf->device_lock, flags); md_spin_unlock_irqrestore(&conf->device_lock, flags);
...@@ -682,7 +682,7 @@ static void compute_block(struct stripe_head *sh, int dd_idx) ...@@ -682,7 +682,7 @@ static void compute_block(struct stripe_head *sh, int dd_idx)
} }
if (count != 1) if (count != 1)
xor_block(count, bh_ptr); xor_block(count, bh_ptr);
set_bit(BH_Uptodate, &sh->bh_cache[dd_idx]->b_state); set_buffer_uptodate(sh->bh_cache[dd_idx]);
} }
static void compute_parity(struct stripe_head *sh, int method) static void compute_parity(struct stripe_head *sh, int method)
...@@ -741,8 +741,8 @@ static void compute_parity(struct stripe_head *sh, int method) ...@@ -741,8 +741,8 @@ static void compute_parity(struct stripe_head *sh, int method)
memcpy(bh->b_data, memcpy(bh->b_data,
bdata,sh->size); bdata,sh->size);
bh_kunmap(chosen[i]); bh_kunmap(chosen[i]);
set_bit(BH_Lock, &bh->b_state); set_buffer_locked(bh);
mark_buffer_uptodate(bh, 1); set_buffer_uptodate(bh);
} }
switch(method) { switch(method) {
...@@ -765,10 +765,10 @@ static void compute_parity(struct stripe_head *sh, int method) ...@@ -765,10 +765,10 @@ static void compute_parity(struct stripe_head *sh, int method)
xor_block(count, bh_ptr); xor_block(count, bh_ptr);
if (method != CHECK_PARITY) { if (method != CHECK_PARITY) {
mark_buffer_uptodate(sh->bh_cache[pd_idx], 1); set_buffer_uptodate(sh->bh_cache[pd_idx]);
set_bit(BH_Lock, &sh->bh_cache[pd_idx]->b_state); set_buffer_locked(sh->bh_cache[pd_idx]);
} else } else
mark_buffer_uptodate(sh->bh_cache[pd_idx], 0); clear_buffer_uptodate(sh->bh_cache[pd_idx]);
} }
static void add_stripe_bh (struct stripe_head *sh, struct buffer_head *bh, int dd_idx, int rw) static void add_stripe_bh (struct stripe_head *sh, struct buffer_head *bh, int dd_idx, int rw)
...@@ -955,7 +955,7 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -955,7 +955,7 @@ static void handle_stripe(struct stripe_head *sh)
compute_block(sh, i); compute_block(sh, i);
uptodate++; uptodate++;
} else if (conf->disks[i].operational) { } else if (conf->disks[i].operational) {
set_bit(BH_Lock, &bh->b_state); set_buffer_locked(bh);
action[i] = READ+1; action[i] = READ+1;
/* if I am just reading this block and we don't have /* if I am just reading this block and we don't have
a failed drive, or any pending writes then sidestep the cache */ a failed drive, or any pending writes then sidestep the cache */
...@@ -1011,7 +1011,7 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -1011,7 +1011,7 @@ static void handle_stripe(struct stripe_head *sh)
if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
{ {
PRINTK("Read_old block %d for r-m-w\n", i); PRINTK("Read_old block %d for r-m-w\n", i);
set_bit(BH_Lock, &bh->b_state); set_buffer_locked(bh);
action[i] = READ+1; action[i] = READ+1;
locked++; locked++;
} else { } else {
...@@ -1030,7 +1030,7 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -1030,7 +1030,7 @@ static void handle_stripe(struct stripe_head *sh)
if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state)) if (test_bit(STRIPE_PREREAD_ACTIVE, &sh->state))
{ {
PRINTK("Read_old block %d for Reconstruct\n", i); PRINTK("Read_old block %d for Reconstruct\n", i);
set_bit(BH_Lock, &bh->b_state); set_buffer_locked(bh);
action[i] = READ+1; action[i] = READ+1;
locked++; locked++;
} else { } else {
...@@ -1094,7 +1094,7 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -1094,7 +1094,7 @@ static void handle_stripe(struct stripe_head *sh)
if (uptodate != disks) if (uptodate != disks)
BUG(); BUG();
bh = sh->bh_cache[failed_num]; bh = sh->bh_cache[failed_num];
set_bit(BH_Lock, &bh->b_state); set_buffer_locked(bh);
action[failed_num] = WRITE+1; action[failed_num] = WRITE+1;
locked++; locked++;
set_bit(STRIPE_INSYNC, &sh->state); set_bit(STRIPE_INSYNC, &sh->state);
...@@ -1146,7 +1146,7 @@ static void handle_stripe(struct stripe_head *sh) ...@@ -1146,7 +1146,7 @@ static void handle_stripe(struct stripe_head *sh)
generic_make_request(action[i]-1, bh); generic_make_request(action[i]-1, bh);
} else { } else {
PRINTK("skip op %d on disc %d for sector %ld\n", action[i]-1, i, sh->sector); PRINTK("skip op %d on disc %d for sector %ld\n", action[i]-1, i, sh->sector);
clear_bit(BH_Lock, &bh->b_state); clear_buffer_locked(bh);
set_bit(STRIPE_HANDLE, &sh->state); set_bit(STRIPE_HANDLE, &sh->state);
} }
} }
...@@ -1223,7 +1223,7 @@ static int raid5_make_request (mddev_t *mddev, int rw, struct buffer_head * bh) ...@@ -1223,7 +1223,7 @@ static int raid5_make_request (mddev_t *mddev, int rw, struct buffer_head * bh)
handle_stripe(sh); handle_stripe(sh);
release_stripe(sh); release_stripe(sh);
} else } else
bh->b_end_io(bh, test_bit(BH_Uptodate, &bh->b_state)); bh->b_end_io(bh, buffer_uptodate(bh));
return 0; return 0;
} }
......
...@@ -361,7 +361,7 @@ affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_resul ...@@ -361,7 +361,7 @@ affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_resul
u32 blocknr = affs_alloc_block(inode, ext_bh->b_blocknr); u32 blocknr = affs_alloc_block(inode, ext_bh->b_blocknr);
if (!blocknr) if (!blocknr)
goto err_alloc; goto err_alloc;
bh_result->b_state |= (1UL << BH_New); set_buffer_new(bh_result);
AFFS_I(inode)->mmu_private += AFFS_SB(sb)->s_data_blksize; AFFS_I(inode)->mmu_private += AFFS_SB(sb)->s_data_blksize;
AFFS_I(inode)->i_blkcnt++; AFFS_I(inode)->i_blkcnt++;
...@@ -400,7 +400,7 @@ affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_resul ...@@ -400,7 +400,7 @@ affs_get_block(struct inode *inode, sector_t block, struct buffer_head *bh_resul
return PTR_ERR(ext_bh); return PTR_ERR(ext_bh);
err_alloc: err_alloc:
brelse(ext_bh); brelse(ext_bh);
bh_result->b_state &= ~(1UL << BH_Mapped); clear_buffer_mapped(bh_result);
bh_result->b_bdev = NULL; bh_result->b_bdev = NULL;
// unlock cache // unlock cache
affs_unlock_ext(inode); affs_unlock_ext(inode);
...@@ -701,7 +701,7 @@ static int affs_commit_write_ofs(struct file *file, struct page *page, unsigned ...@@ -701,7 +701,7 @@ static int affs_commit_write_ofs(struct file *file, struct page *page, unsigned
if (IS_ERR(bh)) if (IS_ERR(bh))
goto out; goto out;
memcpy(AFFS_DATA(bh), data + from, bsize); memcpy(AFFS_DATA(bh), data + from, bsize);
if (bh->b_state & (1UL << BH_New)) { if (buffer_new(bh)) {
AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
...@@ -730,7 +730,7 @@ static int affs_commit_write_ofs(struct file *file, struct page *page, unsigned ...@@ -730,7 +730,7 @@ static int affs_commit_write_ofs(struct file *file, struct page *page, unsigned
goto out; goto out;
tmp = min(bsize, to - from); tmp = min(bsize, to - from);
memcpy(AFFS_DATA(bh), data + from, tmp); memcpy(AFFS_DATA(bh), data + from, tmp);
if (bh->b_state & (1UL << BH_New)) { if (buffer_new(bh)) {
AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA); AFFS_DATA_HEAD(bh)->ptype = cpu_to_be32(T_DATA);
AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino); AFFS_DATA_HEAD(bh)->key = cpu_to_be32(inode->i_ino);
AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx); AFFS_DATA_HEAD(bh)->sequence = cpu_to_be32(bidx);
......
...@@ -119,7 +119,7 @@ static int blkdev_get_block(struct inode * inode, sector_t iblock, struct buffer ...@@ -119,7 +119,7 @@ static int blkdev_get_block(struct inode * inode, sector_t iblock, struct buffer
bh->b_bdev = inode->i_bdev; bh->b_bdev = inode->i_bdev;
bh->b_blocknr = iblock; bh->b_blocknr = iblock;
bh->b_state |= 1UL << BH_Mapped; set_buffer_mapped(bh);
return 0; return 0;
} }
......
...@@ -33,8 +33,6 @@ ...@@ -33,8 +33,6 @@
#include <linux/mempool.h> #include <linux/mempool.h>
#include <asm/bitops.h> #include <asm/bitops.h>
#define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
#define BH_ENTRY(list) list_entry((list), struct buffer_head, b_inode_buffers) #define BH_ENTRY(list) list_entry((list), struct buffer_head, b_inode_buffers)
/* This is used by some architectures to estimate available memory. */ /* This is used by some architectures to estimate available memory. */
...@@ -89,7 +87,7 @@ void unlock_buffer(struct buffer_head *bh) ...@@ -89,7 +87,7 @@ void unlock_buffer(struct buffer_head *bh)
if (atomic_read(&bh->b_count) == 0 && !PageLocked(bh->b_page)) if (atomic_read(&bh->b_count) == 0 && !PageLocked(bh->b_page))
buffer_error(); buffer_error();
clear_bit(BH_Lock, &bh->b_state); clear_buffer_locked(bh);
smp_mb__after_clear_bit(); smp_mb__after_clear_bit();
if (waitqueue_active(&bh->b_wait)) if (waitqueue_active(&bh->b_wait))
wake_up(&bh->b_wait); wake_up(&bh->b_wait);
...@@ -155,7 +153,10 @@ void end_buffer_io_sync(struct buffer_head *bh, int uptodate) ...@@ -155,7 +153,10 @@ void end_buffer_io_sync(struct buffer_head *bh, int uptodate)
{ {
if (!uptodate) if (!uptodate)
printk("%s: I/O error\n", __FUNCTION__); printk("%s: I/O error\n", __FUNCTION__);
mark_buffer_uptodate(bh, uptodate); if (uptodate)
set_buffer_uptodate(bh);
else
clear_buffer_uptodate(bh);
unlock_buffer(bh); unlock_buffer(bh);
put_bh(bh); put_bh(bh);
} }
...@@ -404,6 +405,21 @@ __get_hash_table(struct block_device *bdev, sector_t block, int unused) ...@@ -404,6 +405,21 @@ __get_hash_table(struct block_device *bdev, sector_t block, int unused)
return ret; return ret;
} }
struct buffer_head *get_hash_table(kdev_t dev, sector_t block, int size)
{
struct block_device *bdev;
struct buffer_head *bh;
bdev = bdget(kdev_t_to_nr(dev));
if (!bdev) {
printk("No block device for %s\n", __bdevname(dev));
BUG();
}
bh = __get_hash_table(bdev, block, size);
atomic_dec(&bdev->bd_count);
return bh;
}
EXPORT_SYMBOL(get_hash_table);
void buffer_insert_list(spinlock_t *lock, void buffer_insert_list(spinlock_t *lock,
struct buffer_head *bh, struct list_head *list) struct buffer_head *bh, struct list_head *list)
{ {
...@@ -513,7 +529,10 @@ static void end_buffer_io_async(struct buffer_head *bh, int uptodate) ...@@ -513,7 +529,10 @@ static void end_buffer_io_async(struct buffer_head *bh, int uptodate)
if (!uptodate) if (!uptodate)
printk("%s: I/O error\n", __FUNCTION__); printk("%s: I/O error\n", __FUNCTION__);
mark_buffer_uptodate(bh, uptodate); if (uptodate)
set_buffer_uptodate(bh);
else
clear_buffer_uptodate(bh);
page = bh->b_page; page = bh->b_page;
if (!uptodate) if (!uptodate)
SetPageError(page); SetPageError(page);
...@@ -524,7 +543,7 @@ static void end_buffer_io_async(struct buffer_head *bh, int uptodate) ...@@ -524,7 +543,7 @@ static void end_buffer_io_async(struct buffer_head *bh, int uptodate)
* decide that the page is now completely done. * decide that the page is now completely done.
*/ */
spin_lock_irqsave(&page_uptodate_lock, flags); spin_lock_irqsave(&page_uptodate_lock, flags);
mark_buffer_async(bh, 0); clear_buffer_async(bh);
unlock_buffer(bh); unlock_buffer(bh);
tmp = bh; tmp = bh;
do { do {
...@@ -572,7 +591,7 @@ static void end_buffer_io_async(struct buffer_head *bh, int uptodate) ...@@ -572,7 +591,7 @@ static void end_buffer_io_async(struct buffer_head *bh, int uptodate)
inline void set_buffer_async_io(struct buffer_head *bh) inline void set_buffer_async_io(struct buffer_head *bh)
{ {
bh->b_end_io = end_buffer_io_async; bh->b_end_io = end_buffer_io_async;
mark_buffer_async(bh, 1); set_buffer_async(bh);
} }
/* /*
...@@ -928,6 +947,21 @@ __getblk(struct block_device *bdev, sector_t block, int size) ...@@ -928,6 +947,21 @@ __getblk(struct block_device *bdev, sector_t block, int size)
} }
} }
struct buffer_head *getblk(kdev_t dev, sector_t block, int size)
{
struct block_device *bdev;
struct buffer_head *bh;
bdev = bdget(kdev_t_to_nr(dev));
if (!bdev) {
printk("No block device for %s\n", __bdevname(dev));
BUG();
}
bh = __getblk(bdev, block, size);
atomic_dec(&bdev->bd_count);
return bh;
}
EXPORT_SYMBOL(getblk);
/* /*
* The relationship between dirty buffers and dirty pages: * The relationship between dirty buffers and dirty pages:
* *
...@@ -964,7 +998,7 @@ __getblk(struct block_device *bdev, sector_t block, int size) ...@@ -964,7 +998,7 @@ __getblk(struct block_device *bdev, sector_t block, int size)
*/ */
void mark_buffer_dirty(struct buffer_head *bh) void mark_buffer_dirty(struct buffer_head *bh)
{ {
if (!atomic_set_buffer_dirty(bh)) if (!test_set_buffer_dirty(bh))
__set_page_dirty_nobuffers(bh->b_page); __set_page_dirty_nobuffers(bh->b_page);
} }
...@@ -991,7 +1025,7 @@ void __brelse(struct buffer_head * buf) ...@@ -991,7 +1025,7 @@ void __brelse(struct buffer_head * buf)
*/ */
void __bforget(struct buffer_head * buf) void __bforget(struct buffer_head * buf)
{ {
mark_buffer_clean(buf); clear_buffer_dirty(buf);
__brelse(buf); __brelse(buf);
} }
...@@ -1027,6 +1061,21 @@ struct buffer_head * __bread(struct block_device *bdev, int block, int size) ...@@ -1027,6 +1061,21 @@ struct buffer_head * __bread(struct block_device *bdev, int block, int size)
return NULL; return NULL;
} }
struct buffer_head *bread(kdev_t dev, int block, int size)
{
struct block_device *bdev;
struct buffer_head *bh;
bdev = bdget(kdev_t_to_nr(dev));
if (!bdev) {
printk("No block device for %s\n", __bdevname(dev));
BUG();
}
bh = __bread(bdev, block, size);
atomic_dec(&bdev->bd_count);
return bh;
}
EXPORT_SYMBOL(bread);
void set_bh_page(struct buffer_head *bh, void set_bh_page(struct buffer_head *bh,
struct page *page, unsigned long offset) struct page *page, unsigned long offset)
{ {
...@@ -1049,12 +1098,12 @@ EXPORT_SYMBOL(set_bh_page); ...@@ -1049,12 +1098,12 @@ EXPORT_SYMBOL(set_bh_page);
static void discard_buffer(struct buffer_head * bh) static void discard_buffer(struct buffer_head * bh)
{ {
if (buffer_mapped(bh)) { if (buffer_mapped(bh)) {
mark_buffer_clean(bh); clear_buffer_dirty(bh);
lock_buffer(bh); lock_buffer(bh);
bh->b_bdev = NULL; bh->b_bdev = NULL;
clear_bit(BH_Mapped, &bh->b_state); clear_buffer_mapped(bh);
clear_bit(BH_Req, &bh->b_state); clear_buffer_req(bh);
clear_bit(BH_New, &bh->b_state); clear_buffer_new(bh);
unlock_buffer(bh); unlock_buffer(bh);
} }
} }
...@@ -1164,7 +1213,7 @@ void create_empty_buffers(struct page *page, ...@@ -1164,7 +1213,7 @@ void create_empty_buffers(struct page *page,
if (PageDirty(page)) { if (PageDirty(page)) {
bh = head; bh = head;
do { do {
set_bit(BH_Dirty, &bh->b_state); set_buffer_dirty(bh);
bh = bh->b_this_page; bh = bh->b_this_page;
} while (bh != head); } while (bh != head);
} }
...@@ -1194,9 +1243,9 @@ static void unmap_underlying_metadata(struct buffer_head *bh) ...@@ -1194,9 +1243,9 @@ static void unmap_underlying_metadata(struct buffer_head *bh)
if (buffer_dirty(old_bh)) if (buffer_dirty(old_bh))
buffer_error(); buffer_error();
#endif #endif
mark_buffer_clean(old_bh); clear_buffer_dirty(old_bh);
wait_on_buffer(old_bh); wait_on_buffer(old_bh);
clear_bit(BH_Req, &old_bh->b_state); clear_buffer_req(old_bh);
__brelse(old_bh); __brelse(old_bh);
} }
} }
...@@ -1263,7 +1312,7 @@ static int __block_write_full_page(struct inode *inode, ...@@ -1263,7 +1312,7 @@ static int __block_write_full_page(struct inode *inode,
* zeroed it out. That seems unnecessary and may go * zeroed it out. That seems unnecessary and may go
* away. * away.
*/ */
mark_buffer_uptodate(bh, 1); set_buffer_uptodate(bh);
} else if (!buffer_mapped(bh) && buffer_dirty(bh)) { } else if (!buffer_mapped(bh) && buffer_dirty(bh)) {
if (buffer_new(bh)) if (buffer_new(bh))
buffer_error(); buffer_error();
...@@ -1272,7 +1321,7 @@ static int __block_write_full_page(struct inode *inode, ...@@ -1272,7 +1321,7 @@ static int __block_write_full_page(struct inode *inode,
goto recover; goto recover;
if (buffer_new(bh)) { if (buffer_new(bh)) {
/* blockdev mappings never come here */ /* blockdev mappings never come here */
clear_bit(BH_New, &bh->b_state); clear_buffer_new(bh);
unmap_underlying_metadata(bh); unmap_underlying_metadata(bh);
} }
} }
...@@ -1304,7 +1353,7 @@ static int __block_write_full_page(struct inode *inode, ...@@ -1304,7 +1353,7 @@ static int __block_write_full_page(struct inode *inode,
do { do {
struct buffer_head *next = bh->b_this_page; struct buffer_head *next = bh->b_this_page;
if (buffer_async(bh)) { if (buffer_async(bh)) {
mark_buffer_clean(bh); clear_buffer_dirty(bh);
submit_bh(WRITE, bh); submit_bh(WRITE, bh);
nr_underway++; nr_underway++;
} }
...@@ -1351,15 +1400,15 @@ static int __block_write_full_page(struct inode *inode, ...@@ -1351,15 +1400,15 @@ static int __block_write_full_page(struct inode *inode,
* The buffer may have been set dirty during * The buffer may have been set dirty during
* attachment to a dirty page. * attachment to a dirty page.
*/ */
mark_buffer_clean(bh); clear_buffer_dirty(bh);
} }
bh = bh->b_this_page; bh = bh->b_this_page;
} while (bh != head); } while (bh != head);
do { do {
struct buffer_head *next = bh->b_this_page; struct buffer_head *next = bh->b_this_page;
if (buffer_mapped(bh)) { if (buffer_mapped(bh)) {
mark_buffer_uptodate(bh, 1); set_buffer_uptodate(bh);
mark_buffer_clean(bh); clear_buffer_dirty(bh);
submit_bh(WRITE, bh); submit_bh(WRITE, bh);
nr_underway++; nr_underway++;
} }
...@@ -1396,21 +1445,21 @@ static int __block_prepare_write(struct inode *inode, struct page *page, ...@@ -1396,21 +1445,21 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
block_end = block_start + blocksize; block_end = block_start + blocksize;
if (block_end <= from || block_start >= to) { if (block_end <= from || block_start >= to) {
if (PageUptodate(page)) if (PageUptodate(page))
mark_buffer_uptodate(bh, 1); set_buffer_uptodate(bh);
continue; continue;
} }
clear_bit(BH_New, &bh->b_state); clear_buffer_new(bh);
if (!buffer_mapped(bh)) { if (!buffer_mapped(bh)) {
err = get_block(inode, block, bh, 1); err = get_block(inode, block, bh, 1);
if (err) if (err)
goto out; goto out;
if (buffer_new(bh)) { if (buffer_new(bh)) {
clear_bit(BH_New, &bh->b_state); clear_buffer_new(bh);
unmap_underlying_metadata(bh); unmap_underlying_metadata(bh);
if (PageUptodate(page)) { if (PageUptodate(page)) {
if (!buffer_mapped(bh)) if (!buffer_mapped(bh))
buffer_error(); buffer_error();
mark_buffer_uptodate(bh, 1); set_buffer_uptodate(bh);
continue; continue;
} }
if (block_end > to) if (block_end > to)
...@@ -1424,7 +1473,7 @@ static int __block_prepare_write(struct inode *inode, struct page *page, ...@@ -1424,7 +1473,7 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
} }
} }
if (PageUptodate(page)) { if (PageUptodate(page)) {
mark_buffer_uptodate(bh, 1); set_buffer_uptodate(bh);
continue; continue;
} }
if (!buffer_uptodate(bh) && if (!buffer_uptodate(bh) &&
...@@ -1457,11 +1506,11 @@ static int __block_prepare_write(struct inode *inode, struct page *page, ...@@ -1457,11 +1506,11 @@ static int __block_prepare_write(struct inode *inode, struct page *page,
if (block_start >= to) if (block_start >= to)
break; break;
if (buffer_new(bh)) { if (buffer_new(bh)) {
clear_bit(BH_New, &bh->b_state); clear_buffer_new(bh);
if (buffer_uptodate(bh)) if (buffer_uptodate(bh))
buffer_error(); buffer_error();
memset(kaddr+block_start, 0, bh->b_size); memset(kaddr+block_start, 0, bh->b_size);
mark_buffer_uptodate(bh, 1); set_buffer_uptodate(bh);
mark_buffer_dirty(bh); mark_buffer_dirty(bh);
} }
next_bh: next_bh:
...@@ -1489,7 +1538,7 @@ static int __block_commit_write(struct inode *inode, struct page *page, ...@@ -1489,7 +1538,7 @@ static int __block_commit_write(struct inode *inode, struct page *page,
if (!buffer_uptodate(bh)) if (!buffer_uptodate(bh))
partial = 1; partial = 1;
} else { } else {
mark_buffer_uptodate(bh, 1); set_buffer_uptodate(bh);
mark_buffer_dirty(bh); mark_buffer_dirty(bh);
} }
} }
...@@ -1509,7 +1558,7 @@ static int __block_commit_write(struct inode *inode, struct page *page, ...@@ -1509,7 +1558,7 @@ static int __block_commit_write(struct inode *inode, struct page *page,
* Generic "read page" function for block devices that have the normal * Generic "read page" function for block devices that have the normal
* get_block functionality. This is most of the block device filesystems. * get_block functionality. This is most of the block device filesystems.
* Reads the page asynchronously --- the unlock_buffer() and * Reads the page asynchronously --- the unlock_buffer() and
* mark_buffer_uptodate() functions propagate buffer state into the * set/clear_buffer_uptodate() functions propagate buffer state into the
* page struct once IO has completed. * page struct once IO has completed.
*/ */
int block_read_full_page(struct page *page, get_block_t *get_block) int block_read_full_page(struct page *page, get_block_t *get_block)
...@@ -1549,7 +1598,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block) ...@@ -1549,7 +1598,7 @@ int block_read_full_page(struct page *page, get_block_t *get_block)
memset(kmap(page) + i*blocksize, 0, blocksize); memset(kmap(page) + i*blocksize, 0, blocksize);
flush_dcache_page(page); flush_dcache_page(page);
kunmap(page); kunmap(page);
mark_buffer_uptodate(bh, 1); set_buffer_uptodate(bh);
continue; continue;
} }
/* /*
...@@ -1818,7 +1867,7 @@ int block_truncate_page(struct address_space *mapping, ...@@ -1818,7 +1867,7 @@ int block_truncate_page(struct address_space *mapping,
/* Ok, it's mapped. Make sure it's up-to-date */ /* Ok, it's mapped. Make sure it's up-to-date */
if (PageUptodate(page)) if (PageUptodate(page))
mark_buffer_uptodate(bh, 1); set_buffer_uptodate(bh);
if (!buffer_uptodate(bh)) { if (!buffer_uptodate(bh)) {
err = -EIO; err = -EIO;
...@@ -2011,9 +2060,9 @@ int brw_page(int rw, struct page *page, ...@@ -2011,9 +2060,9 @@ int brw_page(int rw, struct page *page,
lock_buffer(bh); lock_buffer(bh);
bh->b_blocknr = *(b++); bh->b_blocknr = *(b++);
bh->b_bdev = bdev; bh->b_bdev = bdev;
set_bit(BH_Mapped, &bh->b_state); set_buffer_mapped(bh);
if (rw == WRITE) /* To support submit_bh debug tests */ if (rw == WRITE) /* To support submit_bh debug tests */
mark_buffer_uptodate(bh, 1); set_buffer_uptodate(bh);
set_buffer_async_io(bh); set_buffer_async_io(bh);
bh = bh->b_this_page; bh = bh->b_this_page;
} while (bh != head); } while (bh != head);
......
...@@ -403,7 +403,7 @@ static int ext2_alloc_branch(struct inode *inode, ...@@ -403,7 +403,7 @@ static int ext2_alloc_branch(struct inode *inode,
branch[n].bh = bh; branch[n].bh = bh;
branch[n].p = (u32*) bh->b_data + offsets[n]; branch[n].p = (u32*) bh->b_data + offsets[n];
*branch[n].p = branch[n].key; *branch[n].p = branch[n].key;
mark_buffer_uptodate(bh, 1); set_buffer_uptodate(bh);
unlock_buffer(bh); unlock_buffer(bh);
mark_buffer_dirty_inode(bh, inode); mark_buffer_dirty_inode(bh, inode);
/* We used to sync bh here if IS_SYNC(inode). /* We used to sync bh here if IS_SYNC(inode).
...@@ -552,7 +552,7 @@ static int ext2_get_block(struct inode *inode, sector_t iblock, struct buffer_he ...@@ -552,7 +552,7 @@ static int ext2_get_block(struct inode *inode, sector_t iblock, struct buffer_he
if (ext2_splice_branch(inode, iblock, chain, partial, left) < 0) if (ext2_splice_branch(inode, iblock, chain, partial, left) < 0)
goto changed; goto changed;
bh_result->b_state |= (1UL << BH_New); set_buffer_new(bh_result);
goto got_it; goto got_it;
changed: changed:
......
...@@ -575,7 +575,7 @@ static int ext3_alloc_branch(handle_t *handle, struct inode *inode, ...@@ -575,7 +575,7 @@ static int ext3_alloc_branch(handle_t *handle, struct inode *inode,
branch[n].p = (u32*) bh->b_data + offsets[n]; branch[n].p = (u32*) bh->b_data + offsets[n];
*branch[n].p = branch[n].key; *branch[n].p = branch[n].key;
BUFFER_TRACE(bh, "marking uptodate"); BUFFER_TRACE(bh, "marking uptodate");
mark_buffer_uptodate(bh, 1); set_buffer_uptodate(bh);
unlock_buffer(bh); unlock_buffer(bh);
BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
...@@ -746,7 +746,7 @@ static int ext3_get_block_handle(handle_t *handle, struct inode *inode, ...@@ -746,7 +746,7 @@ static int ext3_get_block_handle(handle_t *handle, struct inode *inode,
/* Simplest case - block found, no allocation needed */ /* Simplest case - block found, no allocation needed */
if (!partial) { if (!partial) {
bh_result->b_state &= ~(1UL << BH_New); clear_buffer_new(bh_result);
got_it: got_it:
map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key)); map_bh(bh_result, inode->i_sb, le32_to_cpu(chain[depth-1].key));
/* Clean up and exit */ /* Clean up and exit */
...@@ -812,7 +812,7 @@ static int ext3_get_block_handle(handle_t *handle, struct inode *inode, ...@@ -812,7 +812,7 @@ static int ext3_get_block_handle(handle_t *handle, struct inode *inode,
if (new_size > ei->i_disksize) if (new_size > ei->i_disksize)
ei->i_disksize = new_size; ei->i_disksize = new_size;
bh_result->b_state |= (1UL << BH_New); set_buffer_new(bh_result);
goto got_it; goto got_it;
changed: changed:
...@@ -874,7 +874,7 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode, ...@@ -874,7 +874,7 @@ struct buffer_head *ext3_getblk(handle_t *handle, struct inode * inode,
if (!fatal) { if (!fatal) {
memset(bh->b_data, 0, memset(bh->b_data, 0,
inode->i_sb->s_blocksize); inode->i_sb->s_blocksize);
mark_buffer_uptodate(bh, 1); set_buffer_uptodate(bh);
} }
unlock_buffer(bh); unlock_buffer(bh);
BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata"); BUFFER_TRACE(bh, "call ext3_journal_dirty_metadata");
...@@ -1070,7 +1070,7 @@ static int journal_dirty_async_data(handle_t *handle, struct buffer_head *bh) ...@@ -1070,7 +1070,7 @@ static int journal_dirty_async_data(handle_t *handle, struct buffer_head *bh)
/* For commit_write() in data=journal mode */ /* For commit_write() in data=journal mode */
static int commit_write_fn(handle_t *handle, struct buffer_head *bh) static int commit_write_fn(handle_t *handle, struct buffer_head *bh)
{ {
set_bit(BH_Uptodate, &bh->b_state); set_buffer_uptodate(bh);
return ext3_journal_dirty_metadata(handle, bh); return ext3_journal_dirty_metadata(handle, bh);
} }
...@@ -1423,7 +1423,7 @@ static int ext3_block_truncate_page(handle_t *handle, ...@@ -1423,7 +1423,7 @@ static int ext3_block_truncate_page(handle_t *handle,
/* Ok, it's mapped. Make sure it's up-to-date */ /* Ok, it's mapped. Make sure it's up-to-date */
if (PageUptodate(page)) if (PageUptodate(page))
set_bit(BH_Uptodate, &bh->b_state); set_buffer_uptodate(bh);
if (!buffer_uptodate(bh)) { if (!buffer_uptodate(bh)) {
err = -EIO; err = -EIO;
......
...@@ -75,7 +75,10 @@ void default_fat_set_uptodate ( ...@@ -75,7 +75,10 @@ void default_fat_set_uptodate (
struct buffer_head *bh, struct buffer_head *bh,
int val) int val)
{ {
mark_buffer_uptodate(bh, val); if (val)
set_buffer_uptodate(bh);
else
clear_buffer_uptodate(bh);
} }
int default_fat_is_uptodate (struct super_block *sb, struct buffer_head *bh) int default_fat_is_uptodate (struct super_block *sb, struct buffer_head *bh)
......
...@@ -71,7 +71,7 @@ int fat_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_r ...@@ -71,7 +71,7 @@ int fat_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_r
return phys; return phys;
if (!phys) if (!phys)
BUG(); BUG();
bh_result->b_state |= (1UL << BH_New); set_buffer_new(bh_result);
map_bh(bh_result, sb, phys); map_bh(bh_result, sb, phys);
return 0; return 0;
} }
......
...@@ -114,7 +114,7 @@ int hfs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_r ...@@ -114,7 +114,7 @@ int hfs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_r
phys = hfs_extent_map(HFS_I(inode)->fork, iblock, create); phys = hfs_extent_map(HFS_I(inode)->fork, iblock, create);
if (phys) { if (phys) {
if (create) if (create)
bh_result->b_state |= (1UL << BH_New); set_buffer_new(bh_result);
map_bh(bh_result, inode->i_sb, phys); map_bh(bh_result, inode->i_sb, phys);
return 0; return 0;
} }
...@@ -479,7 +479,7 @@ hfs_s32 hfs_do_write(struct inode *inode, struct hfs_fork * fork, hfs_u32 pos, ...@@ -479,7 +479,7 @@ hfs_s32 hfs_do_write(struct inode *inode, struct hfs_fork * fork, hfs_u32 pos,
pos += c; pos += c;
written += c; written += c;
buf += c; buf += c;
mark_buffer_uptodate(bh, 1); set_buffer_uptodate(bh);
mark_buffer_dirty(bh); mark_buffer_dirty(bh);
brelse(bh); brelse(bh);
} }
......
...@@ -45,7 +45,7 @@ hfs_buffer hfs_buffer_get(hfs_sysmdb sys_mdb, int block, int read) { ...@@ -45,7 +45,7 @@ hfs_buffer hfs_buffer_get(hfs_sysmdb sys_mdb, int block, int read) {
} else { } else {
tmp = sb_getblk(sys_mdb, block); tmp = sb_getblk(sys_mdb, block);
if (tmp) { if (tmp) {
mark_buffer_uptodate(tmp, 1); set_buffer_uptodate(tmp);
} }
} }
if (!tmp) { if (!tmp) {
......
...@@ -180,7 +180,7 @@ void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head ...@@ -180,7 +180,7 @@ void *hpfs_get_sector(struct super_block *s, unsigned secno, struct buffer_head
if ((*bhp = bh = sb_getblk(s, secno)) != NULL) { if ((*bhp = bh = sb_getblk(s, secno)) != NULL) {
if (!buffer_uptodate(bh)) wait_on_buffer(bh); if (!buffer_uptodate(bh)) wait_on_buffer(bh);
mark_buffer_uptodate(bh, 1); set_buffer_uptodate(bh);
return bh->b_data; return bh->b_data;
} else { } else {
printk("HPFS: hpfs_get_sector: getblk failed\n"); printk("HPFS: hpfs_get_sector: getblk failed\n");
......
...@@ -91,7 +91,7 @@ int hpfs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_ ...@@ -91,7 +91,7 @@ int hpfs_get_block(struct inode *inode, sector_t iblock, struct buffer_head *bh_
} }
inode->i_blocks++; inode->i_blocks++;
hpfs_i(inode)->mmu_private += 512; hpfs_i(inode)->mmu_private += 512;
bh_result->b_state |= 1UL << BH_New; set_buffer_new(bh_result);
map_bh(bh_result, inode->i_sb, s); map_bh(bh_result, inode->i_sb, s);
return 0; return 0;
} }
......
...@@ -29,7 +29,10 @@ extern spinlock_t journal_datalist_lock; ...@@ -29,7 +29,10 @@ extern spinlock_t journal_datalist_lock;
static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate) static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate)
{ {
BUFFER_TRACE(bh, ""); BUFFER_TRACE(bh, "");
mark_buffer_uptodate(bh, uptodate); if (uptodate)
set_buffer_uptodate(bh);
else
clear_buffer_uptodate(bh);
unlock_buffer(bh); unlock_buffer(bh);
} }
...@@ -447,9 +450,9 @@ void journal_commit_transaction(journal_t *journal) ...@@ -447,9 +450,9 @@ void journal_commit_transaction(journal_t *journal)
unlock_journal(journal); unlock_journal(journal);
for (i=0; i<bufs; i++) { for (i=0; i<bufs; i++) {
struct buffer_head *bh = wbuf[i]; struct buffer_head *bh = wbuf[i];
set_bit(BH_Lock, &bh->b_state); set_buffer_locked(bh);
clear_bit(BH_Dirty, &bh->b_state); clear_buffer_dirty(bh);
mark_buffer_uptodate(bh, 1); set_buffer_uptodate(bh);
bh->b_end_io = journal_end_buffer_io_sync; bh->b_end_io = journal_end_buffer_io_sync;
submit_bh(WRITE, bh); submit_bh(WRITE, bh);
} }
...@@ -588,7 +591,7 @@ void journal_commit_transaction(journal_t *journal) ...@@ -588,7 +591,7 @@ void journal_commit_transaction(journal_t *journal)
JBUFFER_TRACE(descriptor, "write commit block"); JBUFFER_TRACE(descriptor, "write commit block");
{ {
struct buffer_head *bh = jh2bh(descriptor); struct buffer_head *bh = jh2bh(descriptor);
mark_buffer_uptodate(bh, 1); set_buffer_uptodate(bh);
ll_rw_block(WRITE, 1, &bh); ll_rw_block(WRITE, 1, &bh);
wait_on_buffer(bh); wait_on_buffer(bh);
__brelse(bh); /* One for getblk() */ __brelse(bh); /* One for getblk() */
......
...@@ -474,7 +474,8 @@ int journal_write_metadata_buffer(transaction_t *transaction, ...@@ -474,7 +474,8 @@ int journal_write_metadata_buffer(transaction_t *transaction,
new_bh->b_size = jh2bh(jh_in)->b_size; new_bh->b_size = jh2bh(jh_in)->b_size;
new_bh->b_bdev = transaction->t_journal->j_dev; new_bh->b_bdev = transaction->t_journal->j_dev;
new_bh->b_blocknr = blocknr; new_bh->b_blocknr = blocknr;
new_bh->b_state |= (1 << BH_Mapped) | (1 << BH_Dirty); set_buffer_mapped(new_bh);
set_buffer_dirty(new_bh);
*jh_out = new_jh; *jh_out = new_jh;
...@@ -886,7 +887,7 @@ int journal_create (journal_t *journal) ...@@ -886,7 +887,7 @@ int journal_create (journal_t *journal)
BUFFER_TRACE(bh, "marking dirty"); BUFFER_TRACE(bh, "marking dirty");
mark_buffer_dirty(bh); mark_buffer_dirty(bh);
BUFFER_TRACE(bh, "marking uptodate"); BUFFER_TRACE(bh, "marking uptodate");
mark_buffer_uptodate(bh, 1); set_buffer_uptodate(bh);
unlock_buffer(bh); unlock_buffer(bh);
__brelse(bh); __brelse(bh);
} }
......
...@@ -484,7 +484,7 @@ static int do_one_pass(journal_t *journal, ...@@ -484,7 +484,7 @@ static int do_one_pass(journal_t *journal,
BUFFER_TRACE(nbh, "marking dirty"); BUFFER_TRACE(nbh, "marking dirty");
mark_buffer_dirty(nbh); mark_buffer_dirty(nbh);
BUFFER_TRACE(nbh, "marking uptodate"); BUFFER_TRACE(nbh, "marking uptodate");
mark_buffer_uptodate(nbh, 1); set_buffer_uptodate(nbh);
++info->nr_replays; ++info->nr_replays;
/* ll_rw_block(WRITE, 1, &nbh); */ /* ll_rw_block(WRITE, 1, &nbh); */
unlock_buffer(nbh); unlock_buffer(nbh);
......
...@@ -541,7 +541,7 @@ static void flush_descriptor(journal_t *journal, ...@@ -541,7 +541,7 @@ static void flush_descriptor(journal_t *journal,
{ {
struct buffer_head *bh = jh2bh(descriptor); struct buffer_head *bh = jh2bh(descriptor);
BUFFER_TRACE(bh, "write"); BUFFER_TRACE(bh, "write");
mark_buffer_uptodate(bh, 1); set_buffer_uptodate(bh);
ll_rw_block (WRITE, 1, &bh); ll_rw_block (WRITE, 1, &bh);
} }
} }
......
...@@ -1202,7 +1202,7 @@ void journal_forget (handle_t *handle, struct buffer_head *bh) ...@@ -1202,7 +1202,7 @@ void journal_forget (handle_t *handle, struct buffer_head *bh)
/* If we are forgetting a buffer which is already part /* If we are forgetting a buffer which is already part
* of this transaction, then we can just drop it from * of this transaction, then we can just drop it from
* the transaction immediately. */ * the transaction immediately. */
clear_bit(BH_Dirty, &bh->b_state); clear_buffer_dirty(bh);
clear_bit(BH_JBDDirty, &bh->b_state); clear_bit(BH_JBDDirty, &bh->b_state);
JBUFFER_TRACE(jh, "belongs to current transaction: unfile"); JBUFFER_TRACE(jh, "belongs to current transaction: unfile");
...@@ -1547,9 +1547,8 @@ void __journal_unfile_buffer(struct journal_head *jh) ...@@ -1547,9 +1547,8 @@ void __journal_unfile_buffer(struct journal_head *jh)
__blist_del_buffer(list, jh); __blist_del_buffer(list, jh);
jh->b_jlist = BJ_None; jh->b_jlist = BJ_None;
if (test_and_clear_bit(BH_JBDDirty, &jh2bh(jh)->b_state)) { if (test_and_clear_bit(BH_JBDDirty, &jh2bh(jh)->b_state))
set_bit(BH_Dirty, &jh2bh(jh)->b_state); set_buffer_dirty(jh2bh(jh));
}
} }
void journal_unfile_buffer(struct journal_head *jh) void journal_unfile_buffer(struct journal_head *jh)
...@@ -1856,12 +1855,11 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) ...@@ -1856,12 +1855,11 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
zap_buffer: zap_buffer:
if (buffer_dirty(bh)) if (buffer_dirty(bh))
mark_buffer_clean(bh); clear_buffer_dirty(bh);
J_ASSERT_BH(bh, !buffer_jdirty(bh)); J_ASSERT_BH(bh, !buffer_jdirty(bh));
// clear_bit(BH_Uptodate, &bh->b_state); clear_buffer_mapped(bh);
clear_bit(BH_Mapped, &bh->b_state); clear_buffer_req(bh);
clear_bit(BH_Req, &bh->b_state); clear_buffer_new(bh);
clear_bit(BH_New, &bh->b_state);
bh->b_bdev = NULL; bh->b_bdev = NULL;
return may_free; return may_free;
} }
...@@ -1976,7 +1974,7 @@ void __journal_file_buffer(struct journal_head *jh, ...@@ -1976,7 +1974,7 @@ void __journal_file_buffer(struct journal_head *jh,
if (jlist == BJ_Metadata || jlist == BJ_Reserved || if (jlist == BJ_Metadata || jlist == BJ_Reserved ||
jlist == BJ_Shadow || jlist == BJ_Forget) { jlist == BJ_Shadow || jlist == BJ_Forget) {
if (atomic_set_buffer_clean(jh2bh(jh))) { if (test_clear_buffer_dirty(jh2bh(jh))) {
set_bit(BH_JBDDirty, &jh2bh(jh)->b_state); set_bit(BH_JBDDirty, &jh2bh(jh)->b_state);
} }
} }
......
...@@ -229,7 +229,7 @@ static int jfs_get_block(struct inode *ip, sector_t lblock, ...@@ -229,7 +229,7 @@ static int jfs_get_block(struct inode *ip, sector_t lblock,
rc = extRecord(ip, &xad); rc = extRecord(ip, &xad);
if (rc) if (rc)
goto unlock; goto unlock;
bh_result->b_state |= (1UL << BH_New); set_buffer_new(bh_result);
} }
map_bh(bh_result, ip->i_sb, xaddr); map_bh(bh_result, ip->i_sb, xaddr);
...@@ -249,7 +249,7 @@ static int jfs_get_block(struct inode *ip, sector_t lblock, ...@@ -249,7 +249,7 @@ static int jfs_get_block(struct inode *ip, sector_t lblock,
if (rc) if (rc)
goto unlock; goto unlock;
bh_result->b_state |= (1UL << BH_New); set_buffer_new(bh_result);
map_bh(bh_result, ip->i_sb, addressXAD(&xad)); map_bh(bh_result, ip->i_sb, addressXAD(&xad));
#else /* _JFS_4K */ #else /* _JFS_4K */
......
...@@ -267,7 +267,7 @@ static int direct_get_block(struct inode *ip, sector_t lblock, ...@@ -267,7 +267,7 @@ static int direct_get_block(struct inode *ip, sector_t lblock,
struct buffer_head *bh_result, int create) struct buffer_head *bh_result, int create)
{ {
if (create) if (create)
bh_result->b_state |= (1UL << BH_New); set_buffer_new(bh_result);
map_bh(bh_result, ip->i_sb, lblock); map_bh(bh_result, ip->i_sb, lblock);
......
...@@ -969,7 +969,7 @@ int jfs_symlink(struct inode *dip, struct dentry *dentry, const char *name) ...@@ -969,7 +969,7 @@ int jfs_symlink(struct inode *dip, struct dentry *dentry, const char *name)
memcpy(mp->data, name, copy_size); memcpy(mp->data, name, copy_size);
flush_metapage(mp); flush_metapage(mp);
#if 0 #if 0
mark_buffer_uptodate(bp, 1); set_buffer_uptodate(bp);
mark_buffer_dirty(bp, 1); mark_buffer_dirty(bp, 1);
if (IS_SYNC(dip)) { if (IS_SYNC(dip)) {
ll_rw_block(WRITE, 1, &bp); ll_rw_block(WRITE, 1, &bp);
......
...@@ -89,7 +89,7 @@ static int alloc_branch(struct inode *inode, ...@@ -89,7 +89,7 @@ static int alloc_branch(struct inode *inode,
branch[n].bh = bh; branch[n].bh = bh;
branch[n].p = (block_t*) bh->b_data + offsets[n]; branch[n].p = (block_t*) bh->b_data + offsets[n];
*branch[n].p = branch[n].key; *branch[n].p = branch[n].key;
mark_buffer_uptodate(bh, 1); set_buffer_uptodate(bh);
unlock_buffer(bh); unlock_buffer(bh);
mark_buffer_dirty_inode(bh, inode); mark_buffer_dirty_inode(bh, inode);
parent = nr; parent = nr;
...@@ -194,7 +194,7 @@ static inline int get_block(struct inode * inode, sector_t block, ...@@ -194,7 +194,7 @@ static inline int get_block(struct inode * inode, sector_t block,
if (splice_branch(inode, chain, partial, left) < 0) if (splice_branch(inode, chain, partial, left) < 0)
goto changed; goto changed;
bh->b_state |= (1UL << BH_New); set_buffer_new(bh);
goto got_it; goto got_it;
changed: changed:
......
...@@ -48,7 +48,10 @@ static void end_buffer_read_file_async(struct buffer_head *bh, int uptodate) ...@@ -48,7 +48,10 @@ static void end_buffer_read_file_async(struct buffer_head *bh, int uptodate)
struct buffer_head *tmp; struct buffer_head *tmp;
struct page *page; struct page *page;
mark_buffer_uptodate(bh, uptodate); if (uptodate)
set_buffer_uptodate(bh);
else
clear_buffer_uptodate(bh);
page = bh->b_page; page = bh->b_page;
...@@ -73,7 +76,7 @@ static void end_buffer_read_file_async(struct buffer_head *bh, int uptodate) ...@@ -73,7 +76,7 @@ static void end_buffer_read_file_async(struct buffer_head *bh, int uptodate)
SetPageError(page); SetPageError(page);
spin_lock_irqsave(&page_uptodate_lock, flags); spin_lock_irqsave(&page_uptodate_lock, flags);
mark_buffer_async(bh, 0); clear_buffer_async(bh);
unlock_buffer(bh); unlock_buffer(bh);
tmp = bh->b_this_page; tmp = bh->b_this_page;
...@@ -167,7 +170,7 @@ static int ntfs_file_read_block(struct page *page) ...@@ -167,7 +170,7 @@ static int ntfs_file_read_block(struct page *page)
/* Setup buffer head to correct block. */ /* Setup buffer head to correct block. */
bh->b_blocknr = ((lcn << vol->cluster_size_bits) bh->b_blocknr = ((lcn << vol->cluster_size_bits)
+ vcn_ofs) >> blocksize_bits; + vcn_ofs) >> blocksize_bits;
bh->b_state |= (1UL << BH_Mapped); set_buffer_mapped(bh);
/* Only read initialized data blocks. */ /* Only read initialized data blocks. */
if (iblock < zblock) { if (iblock < zblock) {
arr[nr++] = bh; arr[nr++] = bh;
...@@ -200,12 +203,12 @@ static int ntfs_file_read_block(struct page *page) ...@@ -200,12 +203,12 @@ static int ntfs_file_read_block(struct page *page)
*/ */
handle_hole: handle_hole:
bh->b_blocknr = -1UL; bh->b_blocknr = -1UL;
bh->b_state &= ~(1UL << BH_Mapped); clear_buffer_mapped(bh);
handle_zblock: handle_zblock:
memset(kmap(page) + i * blocksize, 0, blocksize); memset(kmap(page) + i * blocksize, 0, blocksize);
flush_dcache_page(page); flush_dcache_page(page);
kunmap(page); kunmap(page);
set_bit(BH_Uptodate, &bh->b_state); set_buffer_uptodate(bh);
} while (i++, iblock++, (bh = bh->b_this_page) != head); } while (i++, iblock++, (bh = bh->b_this_page) != head);
/* Check we have at least one buffer ready for i/o. */ /* Check we have at least one buffer ready for i/o. */
...@@ -215,7 +218,7 @@ static int ntfs_file_read_block(struct page *page) ...@@ -215,7 +218,7 @@ static int ntfs_file_read_block(struct page *page)
struct buffer_head *tbh = arr[i]; struct buffer_head *tbh = arr[i];
lock_buffer(tbh); lock_buffer(tbh);
tbh->b_end_io = end_buffer_read_file_async; tbh->b_end_io = end_buffer_read_file_async;
mark_buffer_async(tbh, 1); set_buffer_async(tbh);
} }
/* Finally, start i/o on the buffers. */ /* Finally, start i/o on the buffers. */
for (i = 0; i < nr; i++) for (i = 0; i < nr; i++)
...@@ -346,7 +349,10 @@ static void end_buffer_read_mftbmp_async(struct buffer_head *bh, int uptodate) ...@@ -346,7 +349,10 @@ static void end_buffer_read_mftbmp_async(struct buffer_head *bh, int uptodate)
struct buffer_head *tmp; struct buffer_head *tmp;
struct page *page; struct page *page;
mark_buffer_uptodate(bh, uptodate); if (uptodate)
set_buffer_uptodate(bh);
else
clear_buffer_uptodate(bh);
page = bh->b_page; page = bh->b_page;
...@@ -372,7 +378,7 @@ static void end_buffer_read_mftbmp_async(struct buffer_head *bh, int uptodate) ...@@ -372,7 +378,7 @@ static void end_buffer_read_mftbmp_async(struct buffer_head *bh, int uptodate)
SetPageError(page); SetPageError(page);
spin_lock_irqsave(&page_uptodate_lock, flags); spin_lock_irqsave(&page_uptodate_lock, flags);
mark_buffer_async(bh, 0); clear_buffer_async(bh);
unlock_buffer(bh); unlock_buffer(bh);
tmp = bh->b_this_page; tmp = bh->b_this_page;
...@@ -454,7 +460,7 @@ static int ntfs_mftbmp_readpage(ntfs_volume *vol, struct page *page) ...@@ -454,7 +460,7 @@ static int ntfs_mftbmp_readpage(ntfs_volume *vol, struct page *page)
/* Setup buffer head to correct block. */ /* Setup buffer head to correct block. */
bh->b_blocknr = ((lcn << vol->cluster_size_bits) bh->b_blocknr = ((lcn << vol->cluster_size_bits)
+ vcn_ofs) >> blocksize_bits; + vcn_ofs) >> blocksize_bits;
bh->b_state |= (1UL << BH_Mapped); set_buffer_mapped(bh);
/* Only read initialized data blocks. */ /* Only read initialized data blocks. */
if (iblock < zblock) { if (iblock < zblock) {
arr[nr++] = bh; arr[nr++] = bh;
...@@ -480,12 +486,12 @@ static int ntfs_mftbmp_readpage(ntfs_volume *vol, struct page *page) ...@@ -480,12 +486,12 @@ static int ntfs_mftbmp_readpage(ntfs_volume *vol, struct page *page)
* the buffer uptodate. * the buffer uptodate.
*/ */
bh->b_blocknr = -1UL; bh->b_blocknr = -1UL;
bh->b_state &= ~(1UL << BH_Mapped); clear_buffer_mapped(bh);
handle_zblock: handle_zblock:
memset(kmap(page) + i * blocksize, 0, blocksize); memset(kmap(page) + i * blocksize, 0, blocksize);
flush_dcache_page(page); flush_dcache_page(page);
kunmap(page); kunmap(page);
set_bit(BH_Uptodate, &bh->b_state); set_buffer_uptodate(bh);
} while (i++, iblock++, (bh = bh->b_this_page) != head); } while (i++, iblock++, (bh = bh->b_this_page) != head);
/* Check we have at least one buffer ready for i/o. */ /* Check we have at least one buffer ready for i/o. */
...@@ -495,7 +501,7 @@ static int ntfs_mftbmp_readpage(ntfs_volume *vol, struct page *page) ...@@ -495,7 +501,7 @@ static int ntfs_mftbmp_readpage(ntfs_volume *vol, struct page *page)
struct buffer_head *tbh = arr[i]; struct buffer_head *tbh = arr[i];
lock_buffer(tbh); lock_buffer(tbh);
tbh->b_end_io = end_buffer_read_mftbmp_async; tbh->b_end_io = end_buffer_read_mftbmp_async;
mark_buffer_async(tbh, 1); set_buffer_async(tbh);
} }
/* Finally, start i/o on the buffers. */ /* Finally, start i/o on the buffers. */
for (i = 0; i < nr; i++) for (i = 0; i < nr; i++)
...@@ -539,7 +545,10 @@ static void end_buffer_read_mst_async(struct buffer_head *bh, int uptodate) ...@@ -539,7 +545,10 @@ static void end_buffer_read_mst_async(struct buffer_head *bh, int uptodate)
struct page *page; struct page *page;
ntfs_inode *ni; ntfs_inode *ni;
mark_buffer_uptodate(bh, uptodate); if (uptodate)
set_buffer_uptodate(bh);
else
clear_buffer_uptodate(bh);
page = bh->b_page; page = bh->b_page;
...@@ -565,7 +574,7 @@ static void end_buffer_read_mst_async(struct buffer_head *bh, int uptodate) ...@@ -565,7 +574,7 @@ static void end_buffer_read_mst_async(struct buffer_head *bh, int uptodate)
SetPageError(page); SetPageError(page);
spin_lock_irqsave(&page_uptodate_lock, flags); spin_lock_irqsave(&page_uptodate_lock, flags);
mark_buffer_async(bh, 0); clear_buffer_async(bh);
unlock_buffer(bh); unlock_buffer(bh);
tmp = bh->b_this_page; tmp = bh->b_this_page;
...@@ -701,7 +710,7 @@ int ntfs_mst_readpage(struct file *dir, struct page *page) ...@@ -701,7 +710,7 @@ int ntfs_mst_readpage(struct file *dir, struct page *page)
/* Setup buffer head to correct block. */ /* Setup buffer head to correct block. */
bh->b_blocknr = ((lcn << vol->cluster_size_bits) bh->b_blocknr = ((lcn << vol->cluster_size_bits)
+ vcn_ofs) >> blocksize_bits; + vcn_ofs) >> blocksize_bits;
bh->b_state |= (1UL << BH_Mapped); set_buffer_mapped(bh);
/* Only read initialized data blocks. */ /* Only read initialized data blocks. */
if (iblock < zblock) { if (iblock < zblock) {
arr[nr++] = bh; arr[nr++] = bh;
...@@ -734,12 +743,12 @@ int ntfs_mst_readpage(struct file *dir, struct page *page) ...@@ -734,12 +743,12 @@ int ntfs_mst_readpage(struct file *dir, struct page *page)
*/ */
handle_hole: handle_hole:
bh->b_blocknr = -1UL; bh->b_blocknr = -1UL;
bh->b_state &= ~(1UL << BH_Mapped); clear_buffer_mapped(bh);
handle_zblock: handle_zblock:
memset(kmap(page) + i * blocksize, 0, blocksize); memset(kmap(page) + i * blocksize, 0, blocksize);
flush_dcache_page(page); flush_dcache_page(page);
kunmap(page); kunmap(page);
set_bit(BH_Uptodate, &bh->b_state); set_buffer_uptodate(bh);
} while (i++, iblock++, (bh = bh->b_this_page) != head); } while (i++, iblock++, (bh = bh->b_this_page) != head);
/* Check we have at least one buffer ready for i/o. */ /* Check we have at least one buffer ready for i/o. */
...@@ -749,7 +758,7 @@ int ntfs_mst_readpage(struct file *dir, struct page *page) ...@@ -749,7 +758,7 @@ int ntfs_mst_readpage(struct file *dir, struct page *page)
struct buffer_head *tbh = arr[i]; struct buffer_head *tbh = arr[i];
lock_buffer(tbh); lock_buffer(tbh);
tbh->b_end_io = end_buffer_read_mst_async; tbh->b_end_io = end_buffer_read_mst_async;
mark_buffer_async(tbh, 1); set_buffer_async(tbh);
} }
/* Finally, start i/o on the buffers. */ /* Finally, start i/o on the buffers. */
for (i = 0; i < nr; i++) for (i = 0; i < nr; i++)
......
...@@ -48,7 +48,7 @@ inline void do_balance_mark_leaf_dirty (struct tree_balance * tb, ...@@ -48,7 +48,7 @@ inline void do_balance_mark_leaf_dirty (struct tree_balance * tb,
struct buffer_head * bh, int flag) struct buffer_head * bh, int flag)
{ {
if (reiserfs_dont_log(tb->tb_sb)) { if (reiserfs_dont_log(tb->tb_sb)) {
if (!test_and_set_bit(BH_Dirty, &bh->b_state)) { if (!test_set_buffer_dirty(bh)) {
// __mark_buffer_dirty(bh) ; // __mark_buffer_dirty(bh) ;
tb->need_balance_dirty = 1; tb->need_balance_dirty = 1;
} }
...@@ -1225,7 +1225,7 @@ struct buffer_head * get_FEB (struct tree_balance * tb) ...@@ -1225,7 +1225,7 @@ struct buffer_head * get_FEB (struct tree_balance * tb)
bi.bi_parent = 0; bi.bi_parent = 0;
bi.bi_position = 0; bi.bi_position = 0;
make_empty_node (&bi); make_empty_node (&bi);
set_bit(BH_Uptodate, &first_b->b_state); set_buffer_uptodate(first_b);
tb->FEB[i] = 0; tb->FEB[i] = 0;
tb->used[i] = first_b; tb->used[i] = first_b;
...@@ -1272,7 +1272,7 @@ void reiserfs_invalidate_buffer (struct tree_balance * tb, struct buffer_head * ...@@ -1272,7 +1272,7 @@ void reiserfs_invalidate_buffer (struct tree_balance * tb, struct buffer_head *
set_blkh_level( blkh, FREE_LEVEL ); set_blkh_level( blkh, FREE_LEVEL );
set_blkh_nr_item( blkh, 0 ); set_blkh_nr_item( blkh, 0 );
mark_buffer_clean (bh); clear_buffer_dirty(bh);
/* reiserfs_free_block is no longer schedule safe /* reiserfs_free_block is no longer schedule safe
reiserfs_free_block (tb->transaction_handle, tb->tb_sb, bh->b_blocknr); reiserfs_free_block (tb->transaction_handle, tb->tb_sb, bh->b_blocknr);
*/ */
......
...@@ -329,7 +329,7 @@ static int _get_block_create_0 (struct inode * inode, long block, ...@@ -329,7 +329,7 @@ static int _get_block_create_0 (struct inode * inode, long block,
** and jump to the end ** and jump to the end
*/ */
if (PageUptodate(bh_result->b_page)) { if (PageUptodate(bh_result->b_page)) {
mark_buffer_uptodate(bh_result, 1); set_buffer_uptodate(bh_result);
goto finished ; goto finished ;
} }
...@@ -398,7 +398,7 @@ static int _get_block_create_0 (struct inode * inode, long block, ...@@ -398,7 +398,7 @@ static int _get_block_create_0 (struct inode * inode, long block,
pathrelse (&path); pathrelse (&path);
/* I _really_ doubt that you want it. Chris? */ /* I _really_ doubt that you want it. Chris? */
map_bh(bh_result, inode->i_sb, 0); map_bh(bh_result, inode->i_sb, 0);
mark_buffer_uptodate (bh_result, 1); set_buffer_uptodate (bh_result);
return 0; return 0;
} }
...@@ -653,7 +653,7 @@ int reiserfs_get_block (struct inode * inode, sector_t block, ...@@ -653,7 +653,7 @@ int reiserfs_get_block (struct inode * inode, sector_t block,
reiserfs_restore_prepared_buffer(inode->i_sb, bh) ; reiserfs_restore_prepared_buffer(inode->i_sb, bh) ;
goto research; goto research;
} }
bh_result->b_state |= (1UL << BH_New); set_buffer_new(bh_result);
put_block_num(item, pos_in_item, allocated_block_nr) ; put_block_num(item, pos_in_item, allocated_block_nr) ;
unfm_ptr = allocated_block_nr; unfm_ptr = allocated_block_nr;
journal_mark_dirty (&th, inode->i_sb, bh); journal_mark_dirty (&th, inode->i_sb, bh);
...@@ -705,7 +705,7 @@ int reiserfs_get_block (struct inode * inode, sector_t block, ...@@ -705,7 +705,7 @@ int reiserfs_get_block (struct inode * inode, sector_t block,
allocated block for that */ allocated block for that */
unp = cpu_to_le32 (allocated_block_nr); unp = cpu_to_le32 (allocated_block_nr);
set_block_dev_mapped (bh_result, allocated_block_nr, inode); set_block_dev_mapped (bh_result, allocated_block_nr, inode);
bh_result->b_state |= (1UL << BH_New); set_buffer_new(bh_result);
done = 1; done = 1;
} }
tmp_key = key; // ;) tmp_key = key; // ;)
...@@ -761,7 +761,7 @@ int reiserfs_get_block (struct inode * inode, sector_t block, ...@@ -761,7 +761,7 @@ int reiserfs_get_block (struct inode * inode, sector_t block,
reiserfs_free_block (&th, allocated_block_nr); reiserfs_free_block (&th, allocated_block_nr);
goto failure; goto failure;
} }
/* it is important the mark_buffer_uptodate is done after /* it is important the set_buffer_uptodate is done after
** the direct2indirect. The buffer might contain valid ** the direct2indirect. The buffer might contain valid
** data newer than the data on disk (read by readpage, changed, ** data newer than the data on disk (read by readpage, changed,
** and then sent here by writepage). direct2indirect needs ** and then sent here by writepage). direct2indirect needs
...@@ -769,7 +769,7 @@ int reiserfs_get_block (struct inode * inode, sector_t block, ...@@ -769,7 +769,7 @@ int reiserfs_get_block (struct inode * inode, sector_t block,
** if the data in unbh needs to be replaced with data from ** if the data in unbh needs to be replaced with data from
** the disk ** the disk
*/ */
mark_buffer_uptodate (unbh, 1); set_buffer_uptodate (unbh);
/* we've converted the tail, so we must /* we've converted the tail, so we must
** flush unbh before the transaction commits ** flush unbh before the transaction commits
...@@ -809,7 +809,7 @@ int reiserfs_get_block (struct inode * inode, sector_t block, ...@@ -809,7 +809,7 @@ int reiserfs_get_block (struct inode * inode, sector_t block,
block for that */ block for that */
un.unfm_nodenum = cpu_to_le32 (allocated_block_nr); un.unfm_nodenum = cpu_to_le32 (allocated_block_nr);
set_block_dev_mapped (bh_result, allocated_block_nr, inode); set_block_dev_mapped (bh_result, allocated_block_nr, inode);
bh_result->b_state |= (1UL << BH_New); set_buffer_new(bh_result);
done = 1; done = 1;
} else { } else {
/* paste hole to the indirect item */ /* paste hole to the indirect item */
...@@ -1851,7 +1851,7 @@ static int map_block_for_writepage(struct inode *inode, ...@@ -1851,7 +1851,7 @@ static int map_block_for_writepage(struct inode *inode,
goto out ; goto out ;
} }
set_block_dev_mapped(bh_result, get_block_num(item,pos_in_item),inode); set_block_dev_mapped(bh_result, get_block_num(item,pos_in_item),inode);
mark_buffer_uptodate(bh_result, 1); set_buffer_uptodate(bh_result);
} else if (is_direct_le_ih(ih)) { } else if (is_direct_le_ih(ih)) {
char *p ; char *p ;
p = page_address(bh_result->b_page) ; p = page_address(bh_result->b_page) ;
...@@ -1871,7 +1871,7 @@ static int map_block_for_writepage(struct inode *inode, ...@@ -1871,7 +1871,7 @@ static int map_block_for_writepage(struct inode *inode,
journal_mark_dirty(&th, inode->i_sb, bh) ; journal_mark_dirty(&th, inode->i_sb, bh) ;
bytes_copied += copy_size ; bytes_copied += copy_size ;
set_block_dev_mapped(bh_result, 0, inode); set_block_dev_mapped(bh_result, 0, inode);
mark_buffer_uptodate(bh_result, 1); set_buffer_uptodate(bh_result);
/* are there still bytes left? */ /* are there still bytes left? */
if (bytes_copied < bh_result->b_size && if (bytes_copied < bh_result->b_size &&
...@@ -1921,8 +1921,8 @@ static inline void submit_bh_for_writepage(struct buffer_head **bhp, int nr) { ...@@ -1921,8 +1921,8 @@ static inline void submit_bh_for_writepage(struct buffer_head **bhp, int nr) {
** later on in the call chain will be cleaning it. So, we ** later on in the call chain will be cleaning it. So, we
** clean the buffer here, it still gets written either way. ** clean the buffer here, it still gets written either way.
*/ */
clear_bit(BH_Dirty, &bh->b_state) ; clear_buffer_dirty(bh) ;
set_bit(BH_Uptodate, &bh->b_state) ; set_buffer_uptodate(bh) ;
submit_bh(WRITE, bh) ; submit_bh(WRITE, bh) ;
} }
} }
......
...@@ -124,7 +124,7 @@ static void init_journal_hash(struct super_block *p_s_sb) { ...@@ -124,7 +124,7 @@ static void init_journal_hash(struct super_block *p_s_sb) {
*/ */
static int reiserfs_clean_and_file_buffer(struct buffer_head *bh) { static int reiserfs_clean_and_file_buffer(struct buffer_head *bh) {
if (bh) if (bh)
mark_buffer_clean(bh); clear_buffer_dirty(bh);
return 0 ; return 0 ;
} }
...@@ -847,7 +847,7 @@ static int _update_journal_header_block(struct super_block *p_s_sb, unsigned lon ...@@ -847,7 +847,7 @@ static int _update_journal_header_block(struct super_block *p_s_sb, unsigned lon
jh->j_last_flush_trans_id = cpu_to_le32(trans_id) ; jh->j_last_flush_trans_id = cpu_to_le32(trans_id) ;
jh->j_first_unflushed_offset = cpu_to_le32(offset) ; jh->j_first_unflushed_offset = cpu_to_le32(offset) ;
jh->j_mount_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_mount_id) ; jh->j_mount_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_mount_id) ;
set_bit(BH_Dirty, &(SB_JOURNAL(p_s_sb)->j_header_bh->b_state)) ; set_buffer_dirty(SB_JOURNAL(p_s_sb)->j_header_bh) ;
ll_rw_block(WRITE, 1, &(SB_JOURNAL(p_s_sb)->j_header_bh)) ; ll_rw_block(WRITE, 1, &(SB_JOURNAL(p_s_sb)->j_header_bh)) ;
wait_on_buffer((SB_JOURNAL(p_s_sb)->j_header_bh)) ; wait_on_buffer((SB_JOURNAL(p_s_sb)->j_header_bh)) ;
if (!buffer_uptodate(SB_JOURNAL(p_s_sb)->j_header_bh)) { if (!buffer_uptodate(SB_JOURNAL(p_s_sb)->j_header_bh)) {
...@@ -893,7 +893,10 @@ static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate) { ...@@ -893,7 +893,10 @@ static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate) {
reiserfs_warning("clm-2084: pinned buffer %lu:%s sent to disk\n", reiserfs_warning("clm-2084: pinned buffer %lu:%s sent to disk\n",
bh->b_blocknr, bdevname(bh->b_bdev)) ; bh->b_blocknr, bdevname(bh->b_bdev)) ;
} }
mark_buffer_uptodate(bh, uptodate) ; if (uptodate)
set_buffer_uptodate(bh) ;
else
clear_buffer_uptodate(bh) ;
unlock_buffer(bh) ; unlock_buffer(bh) ;
put_bh(bh) ; put_bh(bh) ;
} }
...@@ -902,7 +905,7 @@ static void submit_logged_buffer(struct buffer_head *bh) { ...@@ -902,7 +905,7 @@ static void submit_logged_buffer(struct buffer_head *bh) {
get_bh(bh) ; get_bh(bh) ;
bh->b_end_io = reiserfs_end_buffer_io_sync ; bh->b_end_io = reiserfs_end_buffer_io_sync ;
mark_buffer_notjournal_new(bh) ; mark_buffer_notjournal_new(bh) ;
clear_bit(BH_Dirty, &bh->b_state) ; clear_buffer_dirty(bh) ;
submit_bh(WRITE, bh) ; submit_bh(WRITE, bh) ;
} }
...@@ -1565,12 +1568,12 @@ static int journal_read_transaction(struct super_block *p_s_sb, unsigned long cu ...@@ -1565,12 +1568,12 @@ static int journal_read_transaction(struct super_block *p_s_sb, unsigned long cu
return -1 ; return -1 ;
} }
memcpy(real_blocks[i]->b_data, log_blocks[i]->b_data, real_blocks[i]->b_size) ; memcpy(real_blocks[i]->b_data, log_blocks[i]->b_data, real_blocks[i]->b_size) ;
mark_buffer_uptodate(real_blocks[i], 1) ; set_buffer_uptodate(real_blocks[i]) ;
brelse(log_blocks[i]) ; brelse(log_blocks[i]) ;
} }
/* flush out the real blocks */ /* flush out the real blocks */
for (i = 0 ; i < le32_to_cpu(desc->j_len) ; i++) { for (i = 0 ; i < le32_to_cpu(desc->j_len) ; i++) {
set_bit(BH_Dirty, &(real_blocks[i]->b_state)) ; set_buffer_dirty(real_blocks[i]) ;
ll_rw_block(WRITE, 1, real_blocks + i) ; ll_rw_block(WRITE, 1, real_blocks + i) ;
} }
for (i = 0 ; i < le32_to_cpu(desc->j_len) ; i++) { for (i = 0 ; i < le32_to_cpu(desc->j_len) ; i++) {
...@@ -2389,7 +2392,7 @@ int journal_mark_dirty(struct reiserfs_transaction_handle *th, struct super_bloc ...@@ -2389,7 +2392,7 @@ int journal_mark_dirty(struct reiserfs_transaction_handle *th, struct super_bloc
} }
if (buffer_dirty(bh)) { if (buffer_dirty(bh)) {
clear_bit(BH_Dirty, &bh->b_state) ; clear_buffer_dirty(bh) ;
} }
if (buffer_journaled(bh)) { /* must double check after getting lock */ if (buffer_journaled(bh)) { /* must double check after getting lock */
...@@ -2979,7 +2982,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, struct super_b ...@@ -2979,7 +2982,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, struct super_b
rs = SB_DISK_SUPER_BLOCK(p_s_sb) ; rs = SB_DISK_SUPER_BLOCK(p_s_sb) ;
/* setup description block */ /* setup description block */
d_bh = journ_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_JOURNAL(p_s_sb)->j_start) ; d_bh = journ_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + SB_JOURNAL(p_s_sb)->j_start) ;
mark_buffer_uptodate(d_bh, 1) ; set_buffer_uptodate(d_bh) ;
desc = (struct reiserfs_journal_desc *)(d_bh)->b_data ; desc = (struct reiserfs_journal_desc *)(d_bh)->b_data ;
memset(desc, 0, sizeof(struct reiserfs_journal_desc)) ; memset(desc, 0, sizeof(struct reiserfs_journal_desc)) ;
memcpy(desc->j_magic, JOURNAL_DESC_MAGIC, 8) ; memcpy(desc->j_magic, JOURNAL_DESC_MAGIC, 8) ;
...@@ -2991,7 +2994,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, struct super_b ...@@ -2991,7 +2994,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th, struct super_b
commit = (struct reiserfs_journal_commit *)c_bh->b_data ; commit = (struct reiserfs_journal_commit *)c_bh->b_data ;
memset(commit, 0, sizeof(struct reiserfs_journal_commit)) ; memset(commit, 0, sizeof(struct reiserfs_journal_commit)) ;
commit->j_trans_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_trans_id) ; commit->j_trans_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_trans_id) ;
mark_buffer_uptodate(c_bh, 1) ; set_buffer_uptodate(c_bh) ;
/* init this journal list */ /* init this journal list */
atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_older_commits_done), 0) ; atomic_set(&(SB_JOURNAL_LIST(p_s_sb)[SB_JOURNAL_LIST_INDEX(p_s_sb)].j_older_commits_done), 0) ;
...@@ -3079,7 +3082,7 @@ printk("journal-2020: do_journal_end: BAD desc->j_len is ZERO\n") ; ...@@ -3079,7 +3082,7 @@ printk("journal-2020: do_journal_end: BAD desc->j_len is ZERO\n") ;
struct buffer_head *tmp_bh ; struct buffer_head *tmp_bh ;
tmp_bh = journ_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) + tmp_bh = journ_getblk(p_s_sb, SB_ONDISK_JOURNAL_1st_BLOCK(p_s_sb) +
((cur_write_start + jindex) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ; ((cur_write_start + jindex) % SB_ONDISK_JOURNAL_SIZE(p_s_sb))) ;
mark_buffer_uptodate(tmp_bh, 1) ; set_buffer_uptodate(tmp_bh) ;
memcpy(tmp_bh->b_data, cn->bh->b_data, cn->bh->b_size) ; memcpy(tmp_bh->b_data, cn->bh->b_data, cn->bh->b_size) ;
jindex++ ; jindex++ ;
} else { } else {
......
...@@ -116,7 +116,7 @@ int reiserfs_resize (struct super_block * s, unsigned long block_count_new) ...@@ -116,7 +116,7 @@ int reiserfs_resize (struct super_block * s, unsigned long block_count_new)
reiserfs_test_and_set_le_bit(0, bitmap[i]->b_data); reiserfs_test_and_set_le_bit(0, bitmap[i]->b_data);
mark_buffer_dirty(bitmap[i]) ; mark_buffer_dirty(bitmap[i]) ;
mark_buffer_uptodate(bitmap[i], 1); set_buffer_uptodate(bitmap[i]);
ll_rw_block(WRITE, 1, bitmap + i); ll_rw_block(WRITE, 1, bitmap + i);
wait_on_buffer(bitmap[i]); wait_on_buffer(bitmap[i]);
} }
......
...@@ -141,11 +141,11 @@ void reiserfs_unmap_buffer(struct buffer_head *bh) { ...@@ -141,11 +141,11 @@ void reiserfs_unmap_buffer(struct buffer_head *bh) {
if (buffer_journaled(bh) || buffer_journal_dirty(bh)) { if (buffer_journaled(bh) || buffer_journal_dirty(bh)) {
BUG() ; BUG() ;
} }
mark_buffer_clean(bh) ; clear_buffer_dirty(bh) ;
lock_buffer(bh) ; lock_buffer(bh) ;
clear_bit(BH_Mapped, &bh->b_state) ; clear_buffer_mapped(bh) ;
clear_bit(BH_Req, &bh->b_state) ; clear_buffer_req(bh) ;
clear_bit(BH_New, &bh->b_state) ; clear_buffer_new(bh);
bh->b_bdev = NULL; bh->b_bdev = NULL;
unlock_buffer(bh) ; unlock_buffer(bh) ;
} }
......
...@@ -83,7 +83,7 @@ void sysv_free_block(struct super_block * sb, u32 nr) ...@@ -83,7 +83,7 @@ void sysv_free_block(struct super_block * sb, u32 nr)
*(u16*)bh->b_data = cpu_to_fs16(sb, count); *(u16*)bh->b_data = cpu_to_fs16(sb, count);
memcpy(get_chunk(sb,bh), blocks, count * sizeof(sysv_zone_t)); memcpy(get_chunk(sb,bh), blocks, count * sizeof(sysv_zone_t));
mark_buffer_dirty(bh); mark_buffer_dirty(bh);
mark_buffer_uptodate(bh, 1); set_buffer_uptodate(bh);
brelse(bh); brelse(bh);
count = 0; count = 0;
} }
......
...@@ -145,7 +145,7 @@ static int alloc_branch(struct inode *inode, ...@@ -145,7 +145,7 @@ static int alloc_branch(struct inode *inode,
branch[n].bh = bh; branch[n].bh = bh;
branch[n].p = (u32*) bh->b_data + offsets[n]; branch[n].p = (u32*) bh->b_data + offsets[n];
*branch[n].p = branch[n].key; *branch[n].p = branch[n].key;
mark_buffer_uptodate(bh, 1); set_buffer_uptodate(bh);
unlock_buffer(bh); unlock_buffer(bh);
dirty_indirect(bh, inode); dirty_indirect(bh, inode);
} }
...@@ -246,7 +246,7 @@ static int get_block(struct inode *inode, sector_t iblock, struct buffer_head *b ...@@ -246,7 +246,7 @@ static int get_block(struct inode *inode, sector_t iblock, struct buffer_head *b
if (splice_branch(inode, chain, partial, left) < 0) if (splice_branch(inode, chain, partial, left) < 0)
goto changed; goto changed;
bh_result->b_state |= (1UL << BH_New); set_buffer_new(bh_result);
goto got_it; goto got_it;
changed: changed:
......
...@@ -254,7 +254,7 @@ struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int ...@@ -254,7 +254,7 @@ struct buffer_head * udf_expand_dir_adinicb(struct inode *inode, int *block, int
return NULL; return NULL;
lock_buffer(dbh); lock_buffer(dbh);
memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize); memset(dbh->b_data, 0x00, inode->i_sb->s_blocksize);
mark_buffer_uptodate(dbh, 1); set_buffer_uptodate(dbh);
unlock_buffer(dbh); unlock_buffer(dbh);
mark_buffer_dirty_inode(dbh, inode); mark_buffer_dirty_inode(dbh, inode);
...@@ -348,7 +348,7 @@ static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head ...@@ -348,7 +348,7 @@ static int udf_get_block(struct inode *inode, sector_t block, struct buffer_head
BUG(); BUG();
if (new) if (new)
bh_result->b_state |= (1UL << BH_New); set_buffer_new(bh_result);
map_bh(bh_result, inode->i_sb, phys); map_bh(bh_result, inode->i_sb, phys);
abort: abort:
unlock_kernel(); unlock_kernel();
...@@ -375,7 +375,7 @@ struct buffer_head * udf_getblk(struct inode * inode, long block, ...@@ -375,7 +375,7 @@ struct buffer_head * udf_getblk(struct inode * inode, long block,
{ {
lock_buffer(bh); lock_buffer(bh);
memset(bh->b_data, 0x00, inode->i_sb->s_blocksize); memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
mark_buffer_uptodate(bh, 1); set_buffer_uptodate(bh);
unlock_buffer(bh); unlock_buffer(bh);
mark_buffer_dirty_inode(bh, inode); mark_buffer_dirty_inode(bh, inode);
} }
...@@ -1656,7 +1656,7 @@ int8_t udf_add_aext(struct inode *inode, lb_addr *bloc, int *extoffset, ...@@ -1656,7 +1656,7 @@ int8_t udf_add_aext(struct inode *inode, lb_addr *bloc, int *extoffset,
} }
lock_buffer(nbh); lock_buffer(nbh);
memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize); memset(nbh->b_data, 0x00, inode->i_sb->s_blocksize);
mark_buffer_uptodate(nbh, 1); set_buffer_uptodate(nbh);
unlock_buffer(nbh); unlock_buffer(nbh);
mark_buffer_dirty_inode(nbh, inode); mark_buffer_dirty_inode(nbh, inode);
......
...@@ -984,7 +984,7 @@ static int udf_symlink(struct inode * dir, struct dentry * dentry, const char * ...@@ -984,7 +984,7 @@ static int udf_symlink(struct inode * dir, struct dentry * dentry, const char *
bh = udf_tread(inode->i_sb, block); bh = udf_tread(inode->i_sb, block);
lock_buffer(bh); lock_buffer(bh);
memset(bh->b_data, 0x00, inode->i_sb->s_blocksize); memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
mark_buffer_uptodate(bh, 1); set_buffer_uptodate(bh);
unlock_buffer(bh); unlock_buffer(bh);
mark_buffer_dirty_inode(bh, inode); mark_buffer_dirty_inode(bh, inode);
} }
......
...@@ -225,7 +225,7 @@ void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) { ...@@ -225,7 +225,7 @@ void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) {
for (i = oldcount; i < newcount; i++) { \ for (i = oldcount; i < newcount; i++) { \
bh = sb_getblk(sb, result + i); \ bh = sb_getblk(sb, result + i); \
memset (bh->b_data, 0, sb->s_blocksize); \ memset (bh->b_data, 0, sb->s_blocksize); \
mark_buffer_uptodate(bh, 1); \ set_buffer_uptodate(bh); \
mark_buffer_dirty (bh); \ mark_buffer_dirty (bh); \
if (IS_SYNC(inode)) { \ if (IS_SYNC(inode)) { \
ll_rw_block (WRITE, 1, &bh); \ ll_rw_block (WRITE, 1, &bh); \
...@@ -360,7 +360,7 @@ unsigned ufs_new_fragments (struct inode * inode, u32 * p, unsigned fragment, ...@@ -360,7 +360,7 @@ unsigned ufs_new_fragments (struct inode * inode, u32 * p, unsigned fragment,
bh = sb_bread(sb, tmp + i); bh = sb_bread(sb, tmp + i);
if(bh) if(bh)
{ {
mark_buffer_clean (bh); clear_buffer_dirty(bh);
bh->b_blocknr = result + i; bh->b_blocknr = result + i;
mark_buffer_dirty (bh); mark_buffer_dirty (bh);
if (IS_SYNC(inode)) { if (IS_SYNC(inode)) {
......
...@@ -389,7 +389,7 @@ static int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buf ...@@ -389,7 +389,7 @@ static int ufs_getfrag_block (struct inode *inode, sector_t fragment, struct buf
if (err) if (err)
goto abort; goto abort;
if (new) if (new)
bh_result->b_state |= (1UL << BH_New); set_buffer_new(bh_result);
map_bh(bh_result, sb, phys); map_bh(bh_result, sb, phys);
abort: abort:
unlock_kernel(); unlock_kernel();
...@@ -419,7 +419,7 @@ struct buffer_head *ufs_getfrag(struct inode *inode, unsigned int fragment, ...@@ -419,7 +419,7 @@ struct buffer_head *ufs_getfrag(struct inode *inode, unsigned int fragment,
bh = sb_getblk(inode->i_sb, dummy.b_blocknr); bh = sb_getblk(inode->i_sb, dummy.b_blocknr);
if (buffer_new(&dummy)) { if (buffer_new(&dummy)) {
memset(bh->b_data, 0, inode->i_sb->s_blocksize); memset(bh->b_data, 0, inode->i_sb->s_blocksize);
mark_buffer_uptodate(bh, 1); set_buffer_uptodate(bh);
mark_buffer_dirty(bh); mark_buffer_dirty(bh);
} }
return bh; return bh;
......
...@@ -109,8 +109,13 @@ void ubh_mark_buffer_uptodate (struct ufs_buffer_head * ubh, int flag) ...@@ -109,8 +109,13 @@ void ubh_mark_buffer_uptodate (struct ufs_buffer_head * ubh, int flag)
unsigned i; unsigned i;
if (!ubh) if (!ubh)
return; return;
for ( i = 0; i < ubh->count; i++ ) if (flag) {
mark_buffer_uptodate (ubh->bh[i], flag); for ( i = 0; i < ubh->count; i++ )
set_buffer_uptodate (ubh->bh[i]);
} else {
for ( i = 0; i < ubh->count; i++ )
clear_buffer_uptodate (ubh->bh[i]);
}
} }
void ubh_ll_rw_block (int rw, unsigned nr, struct ufs_buffer_head * ubh[]) void ubh_ll_rw_block (int rw, unsigned nr, struct ufs_buffer_head * ubh[])
......
...@@ -50,7 +50,7 @@ affs_getzeroblk(struct super_block *sb, int block) ...@@ -50,7 +50,7 @@ affs_getzeroblk(struct super_block *sb, int block)
bh = sb_getblk(sb, block); bh = sb_getblk(sb, block);
lock_buffer(bh); lock_buffer(bh);
memset(bh->b_data, 0 , sb->s_blocksize); memset(bh->b_data, 0 , sb->s_blocksize);
mark_buffer_uptodate(bh, 1); set_buffer_uptodate(bh);
unlock_buffer(bh); unlock_buffer(bh);
return bh; return bh;
} }
...@@ -64,7 +64,7 @@ affs_getemptyblk(struct super_block *sb, int block) ...@@ -64,7 +64,7 @@ affs_getemptyblk(struct super_block *sb, int block)
if (block >= AFFS_SB(sb)->s_reserved && block < AFFS_SB(sb)->s_partition_size) { if (block >= AFFS_SB(sb)->s_reserved && block < AFFS_SB(sb)->s_partition_size) {
bh = sb_getblk(sb, block); bh = sb_getblk(sb, block);
wait_on_buffer(bh); wait_on_buffer(bh);
mark_buffer_uptodate(bh, 1); set_buffer_uptodate(bh);
return bh; return bh;
} }
return NULL; return NULL;
......
/*
* include/linux/buffer_head.h
*
* Everything to do with buffer_head.b_state.
*/
#ifndef BUFFER_FLAGS_H
#define BUFFER_FLAGS_H
/* bh state bits */
enum bh_state_bits {
BH_Uptodate, /* 1 if the buffer contains valid data */
BH_Dirty, /* 1 if the buffer is dirty */
BH_Lock, /* 1 if the buffer is locked */
BH_Req, /* 0 if the buffer has been invalidated */
BH_Mapped, /* 1 if the buffer has a disk mapping */
BH_New, /* 1 if the buffer is new and not yet written out */
BH_Async, /* 1 if the buffer is under end_buffer_io_async I/O */
BH_JBD, /* 1 if it has an attached journal_head */
BH_PrivateStart,/* not a state bit, but the first bit available
* for private allocation by other entities
*/
};
#define MAX_BUF_PER_PAGE (PAGE_CACHE_SIZE / 512)
struct page;
struct kiobuf;
struct buffer_head;
typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
/*
* Try to keep the most commonly used fields in single cache lines (16
* bytes) to improve performance. This ordering should be
* particularly beneficial on 32-bit processors.
*
* We use the first 16 bytes for the data which is used in searches
* over the block hash lists (ie. getblk() and friends).
*
* The second 16 bytes we use for lru buffer scans, as used by
* sync_buffers() and refill_freelist(). -- sct
*/
struct buffer_head {
/* First cache line: */
sector_t b_blocknr; /* block number */
unsigned short b_size; /* block size */
struct block_device *b_bdev;
atomic_t b_count; /* users using this block */
unsigned long b_state; /* buffer state bitmap (see above) */
struct buffer_head *b_this_page;/* circular list of page's buffers */
struct page *b_page; /* the page this bh is mapped to */
char * b_data; /* pointer to data block */
bh_end_io_t *b_end_io; /* I/O completion */
void *b_private; /* reserved for b_end_io */
wait_queue_head_t b_wait;
struct list_head b_inode_buffers; /* list of inode dirty buffers */
};
/*
* macro tricks to expand the set_buffer_foo(), clear_buffer_foo()
* and buffer_foo() functions.
*/
#define BUFFER_FNS(bit, name) \
static inline void set_buffer_##name(struct buffer_head *bh) \
{ \
set_bit(BH_##bit, &(bh)->b_state); \
} \
static inline void clear_buffer_##name(struct buffer_head *bh) \
{ \
clear_bit(BH_##bit, &(bh)->b_state); \
} \
static inline int buffer_##name(struct buffer_head *bh) \
{ \
return test_bit(BH_##bit, &(bh)->b_state); \
}
/*
* test_set_buffer_foo() and test_clear_buffer_foo()
*/
#define TAS_BUFFER_FNS(bit, name) \
static inline int test_set_buffer_##name(struct buffer_head *bh) \
{ \
return test_and_set_bit(BH_##bit, &(bh)->b_state); \
} \
static inline int test_clear_buffer_##name(struct buffer_head *bh) \
{ \
return test_and_clear_bit(BH_##bit, &(bh)->b_state); \
} \
BUFFER_FNS(Uptodate, uptodate)
BUFFER_FNS(Dirty, dirty)
TAS_BUFFER_FNS(Dirty, dirty)
BUFFER_FNS(Lock, locked)
TAS_BUFFER_FNS(Lock, locked)
BUFFER_FNS(Req, req)
BUFFER_FNS(Mapped, mapped)
BUFFER_FNS(New, new)
BUFFER_FNS(Async, async)
/*
* Utility macros
*/
/*
* FIXME: this is used only by bh_kmap, which is used only by RAID5.
* Clean this up with blockdev-in-highmem infrastructure.
*/
#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
#define touch_buffer(bh) mark_page_accessed(bh->b_page)
/* If we *know* page->private refers to buffer_heads */
#define page_buffers(page) \
({ \
if (!PagePrivate(page)) \
BUG(); \
((struct buffer_head *)(page)->private); \
})
#define page_has_buffers(page) PagePrivate(page)
#define set_page_buffers(page, buffers) \
do { \
SetPagePrivate(page); \
page->private = (unsigned long)buffers; \
} while (0)
#define clear_page_buffers(page) \
do { \
ClearPagePrivate(page); \
page->private = 0; \
} while (0)
#define invalidate_buffers(dev) __invalidate_buffers((dev), 0)
#define destroy_buffers(dev) __invalidate_buffers((dev), 1)
/*
* Declarations
*/
void FASTCALL(mark_buffer_dirty(struct buffer_head *bh));
void buffer_init(void);
void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
void set_bh_page(struct buffer_head *bh,
struct page *page, unsigned long offset);
int try_to_free_buffers(struct page *);
void create_empty_buffers(struct page *, unsigned long,
unsigned long b_state);
void end_buffer_io_sync(struct buffer_head *bh, int uptodate);
void buffer_insert_list(spinlock_t *lock,
struct buffer_head *, struct list_head *);
struct buffer_head *get_hash_table(kdev_t dev, sector_t block, int size);
struct buffer_head *getblk(kdev_t dev, sector_t block, int size);
struct buffer_head *bread(kdev_t dev, int block, int size);
/* reiserfs_writepage needs this */
void set_buffer_async_io(struct buffer_head *bh) ;
void invalidate_inode_buffers(struct inode *);
void invalidate_bdev(struct block_device *, int);
void __invalidate_buffers(kdev_t dev, int);
int sync_buffers(struct block_device *, int);
void __wait_on_buffer(struct buffer_head *);
int fsync_dev(kdev_t);
int fsync_bdev(struct block_device *);
int fsync_super(struct super_block *);
int fsync_no_super(struct block_device *);
int fsync_buffers_list(spinlock_t *lock, struct list_head *);
int inode_has_buffers(struct inode *);
struct buffer_head *__get_hash_table(struct block_device *, sector_t, int);
struct buffer_head * __getblk(struct block_device *, sector_t, int);
void __brelse(struct buffer_head *);
void __bforget(struct buffer_head *);
struct buffer_head * __bread(struct block_device *, int, int);
void wakeup_bdflush(void);
struct buffer_head *alloc_buffer_head(int async);
void free_buffer_head(struct buffer_head * bh);
int brw_page(int, struct page *, struct block_device *, sector_t [], int);
void FASTCALL(unlock_buffer(struct buffer_head *bh));
/*
* Generic address_space_operations implementations for buffer_head-backed
* address_spaces.
*/
int try_to_release_page(struct page * page, int gfp_mask);
int block_flushpage(struct page *page, unsigned long offset);
int block_symlink(struct inode *, const char *, int);
int block_write_full_page(struct page*, get_block_t*);
int block_read_full_page(struct page*, get_block_t*);
int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*);
int cont_prepare_write(struct page*, unsigned, unsigned, get_block_t*,
unsigned long *);
int generic_cont_expand(struct inode *inode, loff_t size) ;
int block_commit_write(struct page *page, unsigned from, unsigned to);
int block_sync_page(struct page *);
sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
int generic_commit_write(struct file *, struct page *, unsigned, unsigned);
int block_truncate_page(struct address_space *, loff_t, get_block_t *);
int generic_direct_IO(int, struct inode *, struct kiobuf *,
unsigned long, int, get_block_t *);
int file_fsync(struct file *, struct dentry *, int);
#define OSYNC_METADATA (1<<0)
#define OSYNC_DATA (1<<1)
#define OSYNC_INODE (1<<2)
int generic_osync_inode(struct inode *, int);
/*
* inline definitions
*/
static inline void get_bh(struct buffer_head * bh)
{
atomic_inc(&(bh)->b_count);
}
static inline void put_bh(struct buffer_head *bh)
{
smp_mb__before_atomic_dec();
atomic_dec(&bh->b_count);
}
static inline void
mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
{
mark_buffer_dirty(bh);
buffer_insert_list(&inode->i_bufferlist_lock,
bh, &inode->i_dirty_buffers);
}
/*
* If an error happens during the make_request, this function
* has to be recalled. It marks the buffer as clean and not
* uptodate, and it notifys the upper layer about the end
* of the I/O.
*/
static inline void buffer_IO_error(struct buffer_head * bh)
{
clear_buffer_dirty(bh);
/*
* b_end_io has to clear the BH_Uptodate bitflag in the read error
* case, however buffer contents are not necessarily bad if a
* write fails
*/
bh->b_end_io(bh, buffer_uptodate(bh));
}
static inline int fsync_inode_buffers(struct inode *inode)
{
return fsync_buffers_list(&inode->i_bufferlist_lock,
&inode->i_dirty_buffers);
}
static inline void brelse(struct buffer_head *buf)
{
if (buf)
__brelse(buf);
}
static inline void bforget(struct buffer_head *buf)
{
if (buf)
__bforget(buf);
}
static inline struct buffer_head * sb_bread(struct super_block *sb, int block)
{
return __bread(sb->s_bdev, block, sb->s_blocksize);
}
static inline struct buffer_head * sb_getblk(struct super_block *sb, int block)
{
return __getblk(sb->s_bdev, block, sb->s_blocksize);
}
static inline struct buffer_head *
sb_get_hash_table(struct super_block *sb, int block)
{
return __get_hash_table(sb->s_bdev, block, sb->s_blocksize);
}
static inline void
map_bh(struct buffer_head *bh, struct super_block *sb, int block)
{
set_buffer_mapped(bh);
bh->b_bdev = sb->s_bdev;
bh->b_blocknr = block;
}
static inline void wait_on_buffer(struct buffer_head * bh)
{
if (buffer_locked(bh))
__wait_on_buffer(bh);
}
static inline void lock_buffer(struct buffer_head * bh)
{
while (test_set_buffer_locked(bh))
__wait_on_buffer(bh);
}
/*
* Debug
*/
void __buffer_error(char *file, int line);
#define buffer_error() __buffer_error(__FILE__, __LINE__)
#endif /* BUFFER_FLAGS_H */
...@@ -206,96 +206,12 @@ extern int leases_enable, dir_notify_enable, lease_break_time; ...@@ -206,96 +206,12 @@ extern int leases_enable, dir_notify_enable, lease_break_time;
extern void update_atime (struct inode *); extern void update_atime (struct inode *);
#define UPDATE_ATIME(inode) update_atime (inode) #define UPDATE_ATIME(inode) update_atime (inode)
extern void buffer_init(void);
extern void inode_init(unsigned long); extern void inode_init(unsigned long);
extern void mnt_init(unsigned long); extern void mnt_init(unsigned long);
extern void files_init(unsigned long); extern void files_init(unsigned long);
/* bh state bits */ struct buffer_head;
enum bh_state_bits { typedef int (get_block_t)(struct inode*,sector_t,struct buffer_head*,int);
BH_Uptodate, /* 1 if the buffer contains valid data */
BH_Dirty, /* 1 if the buffer is dirty */
BH_Lock, /* 1 if the buffer is locked */
BH_Req, /* 0 if the buffer has been invalidated */
BH_Mapped, /* 1 if the buffer has a disk mapping */
BH_New, /* 1 if the buffer is new and not yet written out */
BH_Async, /* 1 if the buffer is under end_buffer_io_async I/O */
BH_JBD, /* 1 if it has an attached journal_head */
BH_PrivateStart,/* not a state bit, but the first bit available
* for private allocation by other entities
*/
};
/*
* Try to keep the most commonly used fields in single cache lines (16
* bytes) to improve performance. This ordering should be
* particularly beneficial on 32-bit processors.
*
* We use the first 16 bytes for the data which is used in searches
* over the block hash lists (ie. getblk() and friends).
*
* The second 16 bytes we use for lru buffer scans, as used by
* sync_buffers() and refill_freelist(). -- sct
*/
struct buffer_head {
/* First cache line: */
sector_t b_blocknr; /* block number */
unsigned short b_size; /* block size */
struct block_device *b_bdev;
atomic_t b_count; /* users using this block */
unsigned long b_state; /* buffer state bitmap (see above) */
struct buffer_head *b_this_page;/* circular list of buffers in one page */
struct page *b_page; /* the page this bh is mapped to */
char * b_data; /* pointer to data block */
void (*b_end_io)(struct buffer_head *bh, int uptodate); /* I/O completion */
void *b_private; /* reserved for b_end_io */
wait_queue_head_t b_wait;
struct list_head b_inode_buffers; /* doubly linked list of inode dirty buffers */
};
typedef void (bh_end_io_t)(struct buffer_head *bh, int uptodate);
void init_buffer(struct buffer_head *, bh_end_io_t *, void *);
#define __buffer_state(bh, state) (((bh)->b_state & (1UL << BH_##state)) != 0)
#define buffer_uptodate(bh) __buffer_state(bh,Uptodate)
#define buffer_dirty(bh) __buffer_state(bh,Dirty)
#define buffer_locked(bh) __buffer_state(bh,Lock)
#define buffer_req(bh) __buffer_state(bh,Req)
#define buffer_mapped(bh) __buffer_state(bh,Mapped)
#define buffer_new(bh) __buffer_state(bh,New)
#define buffer_async(bh) __buffer_state(bh,Async)
#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
extern void set_bh_page(struct buffer_head *bh, struct page *page, unsigned long offset);
#define touch_buffer(bh) mark_page_accessed(bh->b_page)
/* If we *know* page->private refers to buffer_heads */
#define page_buffers(page) \
({ \
if (!PagePrivate(page)) \
BUG(); \
((struct buffer_head *)(page)->private); \
})
#define page_has_buffers(page) PagePrivate(page)
#define set_page_buffers(page, buffers) \
do { \
SetPagePrivate(page); \
page->private = (unsigned long)buffers; \
} while (0)
#define clear_page_buffers(page) \
do { \
ClearPagePrivate(page); \
page->private = 0; \
} while (0)
#include <linux/pipe_fs_i.h> #include <linux/pipe_fs_i.h>
/* #include <linux/umsdos_fs_i.h> */ /* #include <linux/umsdos_fs_i.h> */
...@@ -1217,82 +1133,6 @@ extern struct file_operations rdwr_pipe_fops; ...@@ -1217,82 +1133,6 @@ extern struct file_operations rdwr_pipe_fops;
extern int fs_may_remount_ro(struct super_block *); extern int fs_may_remount_ro(struct super_block *);
extern int try_to_free_buffers(struct page *);
extern void create_empty_buffers(struct page *, unsigned long,
unsigned long b_state);
extern void end_buffer_io_sync(struct buffer_head *bh, int uptodate);
/* reiserfs_writepage needs this */
extern void set_buffer_async_io(struct buffer_head *bh) ;
static inline void get_bh(struct buffer_head * bh)
{
atomic_inc(&(bh)->b_count);
}
static inline void put_bh(struct buffer_head *bh)
{
smp_mb__before_atomic_dec();
atomic_dec(&bh->b_count);
}
/*
* This is called by bh->b_end_io() handlers when I/O has completed.
*/
static inline void mark_buffer_uptodate(struct buffer_head * bh, int on)
{
if (on)
set_bit(BH_Uptodate, &bh->b_state);
else
clear_bit(BH_Uptodate, &bh->b_state);
}
#define atomic_set_buffer_clean(bh) test_and_clear_bit(BH_Dirty, &(bh)->b_state)
static inline void mark_buffer_clean(struct buffer_head * bh)
{
clear_bit(BH_Dirty, &(bh)->b_state);
}
extern void FASTCALL(mark_buffer_dirty(struct buffer_head *bh));
extern void buffer_insert_list(spinlock_t *lock,
struct buffer_head *, struct list_head *);
static inline void
buffer_insert_inode_queue(struct buffer_head *bh, struct inode *inode)
{
buffer_insert_list(&inode->i_bufferlist_lock,
bh, &inode->i_dirty_buffers);
}
#define atomic_set_buffer_dirty(bh) test_and_set_bit(BH_Dirty, &(bh)->b_state)
static inline void mark_buffer_async(struct buffer_head * bh, int on)
{
if (on)
set_bit(BH_Async, &bh->b_state);
else
clear_bit(BH_Async, &bh->b_state);
}
/*
* If an error happens during the make_request, this function
* has to be recalled. It marks the buffer as clean and not
* uptodate, and it notifys the upper layer about the end
* of the I/O.
*/
static inline void buffer_IO_error(struct buffer_head * bh)
{
mark_buffer_clean(bh);
/*
* b_end_io has to clear the BH_Uptodate bitflag in the read error
* case, however buffer contents are not necessarily bad if a
* write fails
*/
bh->b_end_io(bh, test_bit(BH_Uptodate, &bh->b_state));
}
/* /*
* return READ, READA, or WRITE * return READ, READA, or WRITE
*/ */
...@@ -1303,37 +1143,13 @@ static inline void buffer_IO_error(struct buffer_head * bh) ...@@ -1303,37 +1143,13 @@ static inline void buffer_IO_error(struct buffer_head * bh)
*/ */
#define bio_data_dir(bio) ((bio)->bi_rw & 1) #define bio_data_dir(bio) ((bio)->bi_rw & 1)
extern void buffer_insert_inode_queue(struct buffer_head *, struct inode *);
static inline void mark_buffer_dirty_inode(struct buffer_head *bh, struct inode *inode)
{
mark_buffer_dirty(bh);
buffer_insert_inode_queue(bh, inode);
}
extern int check_disk_change(kdev_t); extern int check_disk_change(kdev_t);
extern int invalidate_inodes(struct super_block *); extern int invalidate_inodes(struct super_block *);
extern int invalidate_device(kdev_t, int); extern int invalidate_device(kdev_t, int);
extern void invalidate_inode_pages(struct inode *); extern void invalidate_inode_pages(struct inode *);
extern void invalidate_inode_pages2(struct address_space *); extern void invalidate_inode_pages2(struct address_space *);
extern void invalidate_inode_buffers(struct inode *);
#define invalidate_buffers(dev) __invalidate_buffers((dev), 0)
#define destroy_buffers(dev) __invalidate_buffers((dev), 1)
extern void invalidate_bdev(struct block_device *, int);
extern void __invalidate_buffers(kdev_t dev, int);
extern void write_inode_now(struct inode *, int); extern void write_inode_now(struct inode *, int);
extern int sync_buffers(struct block_device *, int);
extern int fsync_dev(kdev_t);
extern int fsync_bdev(struct block_device *);
extern int fsync_super(struct super_block *);
extern int fsync_no_super(struct block_device *);
extern void sync_inodes_sb(struct super_block *); extern void sync_inodes_sb(struct super_block *);
extern int fsync_buffers_list(spinlock_t *lock, struct list_head *);
static inline int fsync_inode_buffers(struct inode *inode)
{
return fsync_buffers_list(&inode->i_bufferlist_lock,
&inode->i_dirty_buffers);
}
extern int inode_has_buffers(struct inode *);
extern int filemap_fdatasync(struct address_space *); extern int filemap_fdatasync(struct address_space *);
extern int filemap_fdatawait(struct address_space *); extern int filemap_fdatawait(struct address_space *);
extern void sync_supers(void); extern void sync_supers(void);
...@@ -1440,112 +1256,14 @@ extern void insert_inode_hash(struct inode *); ...@@ -1440,112 +1256,14 @@ extern void insert_inode_hash(struct inode *);
extern void remove_inode_hash(struct inode *); extern void remove_inode_hash(struct inode *);
extern struct file * get_empty_filp(void); extern struct file * get_empty_filp(void);
extern void file_move(struct file *f, struct list_head *list); extern void file_move(struct file *f, struct list_head *list);
extern struct buffer_head * __get_hash_table(struct block_device *, sector_t, int);
static inline struct buffer_head * get_hash_table(kdev_t dev, sector_t block, int size)
{
struct block_device *bdev;
struct buffer_head *bh;
bdev = bdget(kdev_t_to_nr(dev));
if (!bdev) {
printk("No block device for %s\n", __bdevname(dev));
BUG();
}
bh = __get_hash_table(bdev, block, size);
atomic_dec(&bdev->bd_count);
return bh;
}
extern struct buffer_head * __getblk(struct block_device *, sector_t, int);
static inline struct buffer_head * getblk(kdev_t dev, sector_t block, int size)
{
struct block_device *bdev;
struct buffer_head *bh;
bdev = bdget(kdev_t_to_nr(dev));
if (!bdev) {
printk("No block device for %s\n", __bdevname(dev));
BUG();
}
bh = __getblk(bdev, block, size);
atomic_dec(&bdev->bd_count);
return bh;
}
extern void ll_rw_block(int, int, struct buffer_head * bh[]); extern void ll_rw_block(int, int, struct buffer_head * bh[]);
extern int submit_bh(int, struct buffer_head *); extern int submit_bh(int, struct buffer_head *);
struct bio; struct bio;
extern int submit_bio(int, struct bio *); extern int submit_bio(int, struct bio *);
extern int is_read_only(kdev_t); extern int is_read_only(kdev_t);
extern void __brelse(struct buffer_head *);
static inline void brelse(struct buffer_head *buf)
{
if (buf)
__brelse(buf);
}
extern void __bforget(struct buffer_head *);
static inline void bforget(struct buffer_head *buf)
{
if (buf)
__bforget(buf);
}
extern int set_blocksize(kdev_t, int); extern int set_blocksize(kdev_t, int);
extern int sb_set_blocksize(struct super_block *, int); extern int sb_set_blocksize(struct super_block *, int);
extern int sb_min_blocksize(struct super_block *, int); extern int sb_min_blocksize(struct super_block *, int);
extern struct buffer_head * __bread(struct block_device *, int, int);
static inline struct buffer_head * bread(kdev_t dev, int block, int size)
{
struct block_device *bdev;
struct buffer_head *bh;
bdev = bdget(kdev_t_to_nr(dev));
if (!bdev) {
printk("No block device for %s\n", __bdevname(dev));
BUG();
}
bh = __bread(bdev, block, size);
atomic_dec(&bdev->bd_count);
return bh;
}
static inline struct buffer_head * sb_bread(struct super_block *sb, int block)
{
return __bread(sb->s_bdev, block, sb->s_blocksize);
}
static inline struct buffer_head * sb_getblk(struct super_block *sb, int block)
{
return __getblk(sb->s_bdev, block, sb->s_blocksize);
}
static inline struct buffer_head * sb_get_hash_table(struct super_block *sb, int block)
{
return __get_hash_table(sb->s_bdev, block, sb->s_blocksize);
}
static inline void map_bh(struct buffer_head *bh, struct super_block *sb, int block)
{
bh->b_state |= 1 << BH_Mapped;
bh->b_bdev = sb->s_bdev;
bh->b_blocknr = block;
}
extern void wakeup_bdflush(void);
extern struct buffer_head *alloc_buffer_head(int async);
extern void free_buffer_head(struct buffer_head * bh);
extern int brw_page(int, struct page *, struct block_device *, sector_t [], int);
typedef int (get_block_t)(struct inode*,sector_t,struct buffer_head*,int);
/* Generic buffer handling for block filesystems.. */
extern int try_to_release_page(struct page * page, int gfp_mask);
extern int block_flushpage(struct page *page, unsigned long offset);
extern int block_symlink(struct inode *, const char *, int);
extern int block_write_full_page(struct page*, get_block_t*);
extern int block_read_full_page(struct page*, get_block_t*);
extern int block_prepare_write(struct page*, unsigned, unsigned, get_block_t*);
extern int cont_prepare_write(struct page*, unsigned, unsigned, get_block_t*,
unsigned long *);
extern int generic_cont_expand(struct inode *inode, loff_t size) ;
extern int block_commit_write(struct page *page, unsigned from, unsigned to);
extern int block_sync_page(struct page *);
sector_t generic_block_bmap(struct address_space *, sector_t, get_block_t *);
int generic_commit_write(struct file *, struct page *, unsigned, unsigned);
int block_truncate_page(struct address_space *, loff_t, get_block_t *);
extern int generic_direct_IO(int, struct inode *, struct kiobuf *, unsigned long, int, get_block_t *);
extern int generic_file_mmap(struct file *, struct vm_area_struct *); extern int generic_file_mmap(struct file *, struct vm_area_struct *);
extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size); extern int file_read_actor(read_descriptor_t * desc, struct page *page, unsigned long offset, unsigned long size);
...@@ -1595,12 +1313,6 @@ extern ssize_t block_read(struct file *, char *, size_t, loff_t *); ...@@ -1595,12 +1313,6 @@ extern ssize_t block_read(struct file *, char *, size_t, loff_t *);
extern ssize_t char_write(struct file *, const char *, size_t, loff_t *); extern ssize_t char_write(struct file *, const char *, size_t, loff_t *);
extern ssize_t block_write(struct file *, const char *, size_t, loff_t *); extern ssize_t block_write(struct file *, const char *, size_t, loff_t *);
extern int file_fsync(struct file *, struct dentry *, int);
extern int generic_osync_inode(struct inode *, int);
#define OSYNC_METADATA (1<<0)
#define OSYNC_DATA (1<<1)
#define OSYNC_INODE (1<<2)
extern int inode_change_ok(struct inode *, struct iattr *); extern int inode_change_ok(struct inode *, struct iattr *);
extern int inode_setattr(struct inode *, struct iattr *); extern int inode_setattr(struct inode *, struct iattr *);
...@@ -1613,8 +1325,7 @@ static inline ino_t parent_ino(struct dentry *dentry) ...@@ -1613,8 +1325,7 @@ static inline ino_t parent_ino(struct dentry *dentry)
return res; return res;
} }
void __buffer_error(char *file, int line); #include <linux/buffer_head.h>
#define buffer_error() __buffer_error(__FILE__, __LINE__)
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
......
...@@ -233,11 +233,8 @@ enum jbd_state_bits { ...@@ -233,11 +233,8 @@ enum jbd_state_bits {
BH_JBDDirty, /* 1 if buffer is dirty but journaled */ BH_JBDDirty, /* 1 if buffer is dirty but journaled */
}; };
/* Return true if the buffer is one which JBD is managing */ BUFFER_FNS(JBD, jbd)
static inline int buffer_jbd(struct buffer_head *bh) BUFFER_FNS(JBDDirty, jbddirty)
{
return __buffer_state(bh, JBD);
}
static inline struct buffer_head *jh2bh(struct journal_head *jh) static inline struct buffer_head *jh2bh(struct journal_head *jh)
{ {
...@@ -838,7 +835,7 @@ static inline int buffer_jlist_eq(struct buffer_head *bh, int list) ...@@ -838,7 +835,7 @@ static inline int buffer_jlist_eq(struct buffer_head *bh, int list)
/* Return true if this bufer is dirty wrt the journal */ /* Return true if this bufer is dirty wrt the journal */
static inline int buffer_jdirty(struct buffer_head *bh) static inline int buffer_jdirty(struct buffer_head *bh)
{ {
return buffer_jbd(bh) && __buffer_state(bh, JBDDirty); return buffer_jbd(bh) && buffer_jbddirty(bh);
} }
/* Return true if it's a data buffer which journalling is managing */ /* Return true if it's a data buffer which journalling is managing */
......
...@@ -8,26 +8,6 @@ ...@@ -8,26 +8,6 @@
#include <linux/pagemap.h> #include <linux/pagemap.h>
#endif #endif
/*
* Buffer cache locking - note that interrupts may only unlock, not
* lock buffers.
*/
extern void __wait_on_buffer(struct buffer_head *);
static inline void wait_on_buffer(struct buffer_head * bh)
{
if (test_bit(BH_Lock, &bh->b_state))
__wait_on_buffer(bh);
}
static inline void lock_buffer(struct buffer_head * bh)
{
while (test_and_set_bit(BH_Lock, &bh->b_state))
__wait_on_buffer(bh);
}
extern void FASTCALL(unlock_buffer(struct buffer_head *bh));
/* /*
* super-block locking. Again, interrupts may only unlock * super-block locking. Again, interrupts may only unlock
* a super-block (although even this isn't done right now. * a super-block (although even this isn't done right now.
......
...@@ -463,7 +463,7 @@ int __set_page_dirty_buffers(struct page *page) ...@@ -463,7 +463,7 @@ int __set_page_dirty_buffers(struct page *page)
do { do {
if (buffer_uptodate(bh)) if (buffer_uptodate(bh))
set_bit(BH_Dirty, &bh->b_state); set_buffer_dirty(bh);
bh = bh->b_this_page; bh = bh->b_this_page;
} while (bh != head); } while (bh != head);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment