Commit 8d49bf3f authored by Andrew Morton's avatar Andrew Morton Committed by Kai Germaschewski

[PATCH] Fix synchronous writers to wait properly for the result

Mikulas Patocka <mikulas@artax.karlin.mff.cuni.cz> points out a bug in
ll_rw_block() usage.

Typical usage is:

	mark_buffer_dirty(bh);
	ll_rw_block(WRITE, 1, &bh);
	wait_on_buffer(bh);

the problem is that if the buffer was locked on entry to this code sequence
(due to in-progress I/O), ll_rw_block() will not wait, and start new I/O.  So
this code will wait on the _old_ I/O, and will then continue execution,
leaving the buffer dirty.

It turns out that all callers were only writing one buffer, and they were all
waiting on that writeout.  So I added a new sync_dirty_buffer() function:

	void sync_dirty_buffer(struct buffer_head *bh)
	{
		lock_buffer(bh);
		if (test_clear_buffer_dirty(bh)) {
			get_bh(bh);
			bh->b_end_io = end_buffer_io_sync;
			submit_bh(WRITE, bh);
		} else {
			unlock_buffer(bh);
		}
	}

which allowed a fair amount of code to be removed, while adding the desired
data-integrity guarantees.

UFS has its own wrappers around ll_rw_block() which got in the way, so this
operation was open-coded in that case.
parent 3d9d6859
......@@ -2618,6 +2618,24 @@ void ll_rw_block(int rw, int nr, struct buffer_head *bhs[])
}
}
/*
* For a data-integrity writeout, we need to wait upon any in-progress I/O
* and then start new I/O and then wait upon it.
*/
void sync_dirty_buffer(struct buffer_head *bh)
{
WARN_ON(atomic_read(&bh->b_count) < 1);
lock_buffer(bh);
if (test_clear_buffer_dirty(bh)) {
get_bh(bh);
bh->b_end_io = end_buffer_io_sync;
submit_bh(WRITE, bh);
wait_on_buffer(bh);
} else {
unlock_buffer(bh);
}
}
/*
* Sanity checks for try_to_free_buffers.
*/
......
......@@ -233,10 +233,8 @@ void ext2_free_blocks (struct inode * inode, unsigned long block,
}
mark_buffer_dirty(bitmap_bh);
if (sb->s_flags & MS_SYNCHRONOUS) {
ll_rw_block(WRITE, 1, &bitmap_bh);
wait_on_buffer(bitmap_bh);
}
if (sb->s_flags & MS_SYNCHRONOUS)
sync_dirty_buffer(bitmap_bh);
group_release_blocks(desc, bh2, group_freed);
freed += group_freed;
......@@ -466,10 +464,8 @@ int ext2_new_block (struct inode * inode, unsigned long goal,
write_unlock(&EXT2_I(inode)->i_meta_lock);
mark_buffer_dirty(bitmap_bh);
if (sb->s_flags & MS_SYNCHRONOUS) {
ll_rw_block(WRITE, 1, &bitmap_bh);
wait_on_buffer(bitmap_bh);
}
if (sb->s_flags & MS_SYNCHRONOUS)
sync_dirty_buffer(bitmap_bh);
ext2_debug ("allocating block %d. ", block);
......
......@@ -146,10 +146,8 @@ void ext2_free_inode (struct inode * inode)
mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
}
mark_buffer_dirty(bitmap_bh);
if (sb->s_flags & MS_SYNCHRONOUS) {
ll_rw_block(WRITE, 1, &bitmap_bh);
wait_on_buffer(bitmap_bh);
}
if (sb->s_flags & MS_SYNCHRONOUS)
sync_dirty_buffer(bitmap_bh);
sb->s_dirt = 1;
error_return:
brelse(bitmap_bh);
......@@ -485,10 +483,8 @@ struct inode * ext2_new_inode(struct inode * dir, int mode)
ext2_set_bit(i, bitmap_bh->b_data);
mark_buffer_dirty(bitmap_bh);
if (sb->s_flags & MS_SYNCHRONOUS) {
ll_rw_block(WRITE, 1, &bitmap_bh);
wait_on_buffer(bitmap_bh);
}
if (sb->s_flags & MS_SYNCHRONOUS)
sync_dirty_buffer(bitmap_bh);
brelse(bitmap_bh);
ino = group * EXT2_INODES_PER_GROUP(sb) + i + 1;
......
......@@ -443,10 +443,8 @@ static int ext2_alloc_branch(struct inode *inode,
* But we now rely upon generic_osync_inode()
* and b_inode_buffers. But not for directories.
*/
if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode)) {
ll_rw_block(WRITE, 1, &bh);
wait_on_buffer(bh);
}
if (S_ISDIR(inode->i_mode) && IS_DIRSYNC(inode))
sync_dirty_buffer(bh);
parent = nr;
}
if (n == num)
......@@ -1208,8 +1206,7 @@ static int ext2_update_inode(struct inode * inode, int do_sync)
raw_inode->i_block[n] = ei->i_data[n];
mark_buffer_dirty(bh);
if (do_sync) {
ll_rw_block (WRITE, 1, &bh);
wait_on_buffer (bh);
sync_dirty_buffer(bh);
if (buffer_req(bh) && !buffer_uptodate(bh)) {
printk ("IO error syncing ext2 inode [%s:%08lx]\n",
sb->s_id, (unsigned long) ino);
......
......@@ -842,8 +842,7 @@ static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es)
{
es->s_wtime = cpu_to_le32(get_seconds());
mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
ll_rw_block(WRITE, 1, &EXT2_SB(sb)->s_sbh);
wait_on_buffer(EXT2_SB(sb)->s_sbh);
sync_dirty_buffer(EXT2_SB(sb)->s_sbh);
sb->s_dirt = 0;
}
......
......@@ -774,8 +774,7 @@ ext2_xattr_set2(struct inode *inode, struct buffer_head *old_bh,
}
mark_buffer_dirty(new_bh);
if (IS_SYNC(inode)) {
ll_rw_block(WRITE, 1, &new_bh);
wait_on_buffer(new_bh);
sync_dirty_buffer(new_bh);
error = -EIO;
if (buffer_req(new_bh) && !buffer_uptodate(new_bh))
goto cleanup;
......@@ -865,10 +864,8 @@ ext2_xattr_delete_inode(struct inode *inode)
HDR(bh)->h_refcount = cpu_to_le32(
le32_to_cpu(HDR(bh)->h_refcount) - 1);
mark_buffer_dirty(bh);
if (IS_SYNC(inode)) {
ll_rw_block(WRITE, 1, &bh);
wait_on_buffer(bh);
}
if (IS_SYNC(inode))
sync_dirty_buffer(bh);
DQUOT_FREE_BLOCK(inode, 1);
}
EXT2_I(inode)->i_file_acl = 0;
......
......@@ -1627,10 +1627,8 @@ static void ext3_commit_super (struct super_block * sb,
es->s_wtime = cpu_to_le32(get_seconds());
BUFFER_TRACE(EXT3_SB(sb)->s_sbh, "marking dirty");
mark_buffer_dirty(EXT3_SB(sb)->s_sbh);
if (sync) {
ll_rw_block(WRITE, 1, &EXT3_SB(sb)->s_sbh);
wait_on_buffer(EXT3_SB(sb)->s_sbh);
}
if (sync)
sync_dirty_buffer(EXT3_SB(sb)->s_sbh);
}
......
......@@ -562,8 +562,7 @@ void journal_commit_transaction(journal_t *journal)
{
struct buffer_head *bh = jh2bh(descriptor);
set_buffer_uptodate(bh);
ll_rw_block(WRITE, 1, &bh);
wait_on_buffer(bh);
sync_dirty_buffer(bh);
__brelse(bh); /* One for getblk() */
journal_unlock_journal_head(descriptor);
}
......
......@@ -960,9 +960,10 @@ void journal_update_superblock(journal_t *journal, int wait)
BUFFER_TRACE(bh, "marking dirty");
mark_buffer_dirty(bh);
ll_rw_block(WRITE, 1, &bh);
if (wait)
wait_on_buffer(bh);
sync_dirty_buffer(bh);
else
ll_rw_block(WRITE, 1, &bh);
/* If we have just flushed the log (by marking s_start==0), then
* any future commit will have to be careful to update the
......@@ -1296,8 +1297,7 @@ static int journal_convert_superblock_v1(journal_t *journal,
bh = journal->j_sb_buffer;
BUFFER_TRACE(bh, "marking dirty");
mark_buffer_dirty(bh);
ll_rw_block(WRITE, 1, &bh);
wait_on_buffer(bh);
sync_dirty_buffer(bh);
return 0;
}
......
......@@ -1079,8 +1079,7 @@ int journal_dirty_data (handle_t *handle, struct buffer_head *bh)
atomic_inc(&bh->b_count);
spin_unlock(&journal_datalist_lock);
need_brelse = 1;
ll_rw_block(WRITE, 1, &bh);
wait_on_buffer(bh);
sync_dirty_buffer(bh);
spin_lock(&journal_datalist_lock);
/* The buffer may become locked again at any
time if it is redirtied */
......@@ -1361,8 +1360,7 @@ void journal_sync_buffer(struct buffer_head *bh)
}
atomic_inc(&bh->b_count);
spin_unlock(&journal_datalist_lock);
ll_rw_block (WRITE, 1, &bh);
wait_on_buffer(bh);
sync_dirty_buffer(bh);
__brelse(bh);
goto out;
}
......
......@@ -2980,8 +2980,7 @@ static void duplicateIXtree(struct super_block *sb, s64 blkno,
j_sb->s_flag |= JFS_BAD_SAIT;
mark_buffer_dirty(bh);
ll_rw_block(WRITE, 1, &bh);
wait_on_buffer(bh);
sync_dirty_buffer(bh);
brelse(bh);
return;
}
......
......@@ -449,8 +449,7 @@ int updateSuper(struct super_block *sb, uint state)
}
mark_buffer_dirty(bh);
ll_rw_block(WRITE, 1, &bh);
wait_on_buffer(bh);
sync_dirty_buffer(bh);
brelse(bh);
return 0;
......
......@@ -972,10 +972,8 @@ int jfs_symlink(struct inode *dip, struct dentry *dentry, const char *name)
#if 0
set_buffer_uptodate(bp);
mark_buffer_dirty(bp, 1);
if (IS_SYNC(dip)) {
ll_rw_block(WRITE, 1, &bp);
wait_on_buffer(bp);
}
if (IS_SYNC(dip))
sync_dirty_buffer(bp);
brelse(bp);
#endif /* 0 */
ssize -= copy_size;
......
......@@ -243,8 +243,7 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
/* synchronously update superblock */
mark_buffer_dirty(bh);
ll_rw_block(WRITE, 1, &bh);
wait_on_buffer(bh);
sync_dirty_buffer(bh);
brelse(bh);
/*
......@@ -512,15 +511,13 @@ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize)
memcpy(j_sb2, j_sb, sizeof (struct jfs_superblock));
mark_buffer_dirty(bh);
ll_rw_block(WRITE, 1, &bh2);
wait_on_buffer(bh2);
sync_dirty_buffer(bh2);
brelse(bh2);
}
/* write primary superblock */
mark_buffer_dirty(bh);
ll_rw_block(WRITE, 1, &bh);
wait_on_buffer(bh);
sync_dirty_buffer(bh);
brelse(bh);
goto resume;
......
......@@ -517,8 +517,7 @@ int minix_sync_inode(struct inode * inode)
bh = minix_update_inode(inode);
if (bh && buffer_dirty(bh))
{
ll_rw_block(WRITE, 1, &bh);
wait_on_buffer(bh);
sync_dirty_buffer(bh);
if (buffer_req(bh) && !buffer_uptodate(bh))
{
printk ("IO error syncing minix inode [%s:%08lx]\n",
......
......@@ -505,8 +505,7 @@ static struct buffer_head *read_ntfs_boot_sector(struct super_block *sb,
memcpy(bh_primary->b_data, bh_backup->b_data,
sb->s_blocksize);
mark_buffer_dirty(bh_primary);
ll_rw_block(WRITE, 1, &bh_primary);
wait_on_buffer(bh_primary);
sync_dirty_buffer(bh_primary);
if (buffer_uptodate(bh_primary)) {
brelse(bh_backup);
return bh_primary;
......
......@@ -44,8 +44,7 @@ int qnx4_sync_inode(struct inode *inode)
bh = qnx4_update_inode(inode);
if (bh && buffer_dirty(bh))
{
ll_rw_block(WRITE, 1, &bh);
wait_on_buffer(bh);
sync_dirty_buffer(bh);
if (buffer_req(bh) && !buffer_uptodate(bh))
{
printk ("IO error syncing qnx4 inode [%s:%08lx]\n",
......
......@@ -735,8 +735,7 @@ reiserfs_panic(s, "journal-539: flush_commit_list: BAD count(%d) > orig_commit_l
}
mark_buffer_dirty(jl->j_commit_bh) ;
ll_rw_block(WRITE, 1, &(jl->j_commit_bh)) ;
wait_on_buffer(jl->j_commit_bh) ;
sync_dirty_buffer(jl->j_commit_bh) ;
if (!buffer_uptodate(jl->j_commit_bh)) {
reiserfs_panic(s, "journal-615: buffer write failed\n") ;
}
......@@ -828,8 +827,7 @@ static int _update_journal_header_block(struct super_block *p_s_sb, unsigned lon
jh->j_first_unflushed_offset = cpu_to_le32(offset) ;
jh->j_mount_id = cpu_to_le32(SB_JOURNAL(p_s_sb)->j_mount_id) ;
set_buffer_dirty(SB_JOURNAL(p_s_sb)->j_header_bh) ;
ll_rw_block(WRITE, 1, &(SB_JOURNAL(p_s_sb)->j_header_bh)) ;
wait_on_buffer((SB_JOURNAL(p_s_sb)->j_header_bh)) ;
sync_dirty_buffer(SB_JOURNAL(p_s_sb)->j_header_bh) ;
if (!buffer_uptodate(SB_JOURNAL(p_s_sb)->j_header_bh)) {
printk( "reiserfs: journal-837: IO error during journal replay\n" );
return -EIO ;
......
......@@ -120,8 +120,7 @@ int reiserfs_resize (struct super_block * s, unsigned long block_count_new)
mark_buffer_dirty(bitmap[i].bh) ;
set_buffer_uptodate(bitmap[i].bh);
ll_rw_block(WRITE, 1, &bitmap[i].bh);
wait_on_buffer(bitmap[i].bh);
sync_dirty_buffer(bitmap[i].bh);
// update bitmap_info stuff
bitmap[i].first_zero_hint=1;
bitmap[i].free_count = sb_blocksize(sb) * 8 - 1;
......
......@@ -265,8 +265,7 @@ int sysv_sync_inode(struct inode * inode)
bh = sysv_update_inode(inode);
if (bh && buffer_dirty(bh)) {
ll_rw_block(WRITE, 1, &bh);
wait_on_buffer(bh);
sync_dirty_buffer(bh);
if (buffer_req(bh) && !buffer_uptodate(bh)) {
printk ("IO error syncing sysv inode [%s:%08lx]\n",
inode->i_sb->s_id, inode->i_ino);
......
......@@ -15,10 +15,8 @@ enum {DIRECT = 10, DEPTH = 4}; /* Have triple indirect */
static inline void dirty_indirect(struct buffer_head *bh, struct inode *inode)
{
mark_buffer_dirty_inode(bh, inode);
if (IS_SYNC(inode)) {
ll_rw_block (WRITE, 1, &bh);
wait_on_buffer (bh);
}
if (IS_SYNC(inode))
sync_dirty_buffer(bh);
}
static int block_to_path(struct inode *inode, long block, int offsets[DEPTH])
......
......@@ -1520,8 +1520,7 @@ udf_update_inode(struct inode *inode, int do_sync)
mark_buffer_dirty(bh);
if (do_sync)
{
ll_rw_block(WRITE, 1, &bh);
wait_on_buffer(bh);
sync_dirty_buffer(bh);
if (buffer_req(bh) && !buffer_uptodate(bh))
{
printk("IO error syncing udf inode [%s:%08lx]\n",
......
......@@ -114,6 +114,7 @@ void ufs_free_fragments (struct inode * inode, unsigned fragment, unsigned count
ubh_mark_buffer_dirty (USPI_UBH);
ubh_mark_buffer_dirty (UCPI_UBH);
if (sb->s_flags & MS_SYNCHRONOUS) {
ubh_wait_on_buffer (UCPI_UBH);
ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
ubh_wait_on_buffer (UCPI_UBH);
}
......@@ -199,6 +200,7 @@ void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) {
ubh_mark_buffer_dirty (USPI_UBH);
ubh_mark_buffer_dirty (UCPI_UBH);
if (sb->s_flags & MS_SYNCHRONOUS) {
ubh_wait_on_buffer (UCPI_UBH);
ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
ubh_wait_on_buffer (UCPI_UBH);
}
......@@ -228,10 +230,8 @@ void ufs_free_blocks (struct inode * inode, unsigned fragment, unsigned count) {
memset (bh->b_data, 0, sb->s_blocksize); \
set_buffer_uptodate(bh); \
mark_buffer_dirty (bh); \
if (IS_SYNC(inode)) { \
ll_rw_block (WRITE, 1, &bh); \
wait_on_buffer (bh); \
} \
if (IS_SYNC(inode)) \
sync_dirty_buffer(bh); \
brelse (bh); \
}
......@@ -364,10 +364,8 @@ unsigned ufs_new_fragments (struct inode * inode, u32 * p, unsigned fragment,
clear_buffer_dirty(bh);
bh->b_blocknr = result + i;
mark_buffer_dirty (bh);
if (IS_SYNC(inode)) {
ll_rw_block (WRITE, 1, &bh);
wait_on_buffer (bh);
}
if (IS_SYNC(inode))
sync_dirty_buffer(bh);
brelse (bh);
}
else
......@@ -459,6 +457,7 @@ unsigned ufs_add_fragments (struct inode * inode, unsigned fragment,
ubh_mark_buffer_dirty (USPI_UBH);
ubh_mark_buffer_dirty (UCPI_UBH);
if (sb->s_flags & MS_SYNCHRONOUS) {
ubh_wait_on_buffer (UCPI_UBH);
ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
ubh_wait_on_buffer (UCPI_UBH);
}
......@@ -584,6 +583,7 @@ unsigned ufs_alloc_fragments (struct inode * inode, unsigned cgno,
ubh_mark_buffer_dirty (USPI_UBH);
ubh_mark_buffer_dirty (UCPI_UBH);
if (sb->s_flags & MS_SYNCHRONOUS) {
ubh_wait_on_buffer (UCPI_UBH);
ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **)&ucpi);
ubh_wait_on_buffer (UCPI_UBH);
}
......
......@@ -356,10 +356,8 @@ void ufs_set_link(struct inode *dir, struct ufs_dir_entry *de,
dir->i_version++;
de->d_ino = cpu_to_fs32(dir->i_sb, inode->i_ino);
mark_buffer_dirty(bh);
if (IS_DIRSYNC(dir)) {
ll_rw_block (WRITE, 1, &bh);
wait_on_buffer(bh);
}
if (IS_DIRSYNC(dir))
sync_dirty_buffer(bh);
brelse (bh);
}
......@@ -457,10 +455,8 @@ int ufs_add_link(struct dentry *dentry, struct inode *inode)
de->d_ino = cpu_to_fs32(sb, inode->i_ino);
ufs_set_de_type(sb, de, inode->i_mode);
mark_buffer_dirty(bh);
if (IS_DIRSYNC(dir)) {
ll_rw_block (WRITE, 1, &bh);
wait_on_buffer (bh);
}
if (IS_DIRSYNC(dir))
sync_dirty_buffer(bh);
brelse (bh);
dir->i_mtime = dir->i_ctime = CURRENT_TIME;
dir->i_version++;
......@@ -508,10 +504,8 @@ int ufs_delete_entry (struct inode * inode, struct ufs_dir_entry * dir,
inode->i_ctime = inode->i_mtime = CURRENT_TIME;
mark_inode_dirty(inode);
mark_buffer_dirty(bh);
if (IS_DIRSYNC(inode)) {
ll_rw_block(WRITE, 1, &bh);
wait_on_buffer(bh);
}
if (IS_DIRSYNC(inode))
sync_dirty_buffer(bh);
brelse(bh);
UFSD(("EXIT\n"))
return 0;
......
......@@ -124,6 +124,7 @@ void ufs_free_inode (struct inode * inode)
ubh_mark_buffer_dirty (USPI_UBH);
ubh_mark_buffer_dirty (UCPI_UBH);
if (sb->s_flags & MS_SYNCHRONOUS) {
ubh_wait_on_buffer (UCPI_UBH);
ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **) &ucpi);
ubh_wait_on_buffer (UCPI_UBH);
}
......@@ -248,6 +249,7 @@ struct inode * ufs_new_inode(struct inode * dir, int mode)
ubh_mark_buffer_dirty (USPI_UBH);
ubh_mark_buffer_dirty (UCPI_UBH);
if (sb->s_flags & MS_SYNCHRONOUS) {
ubh_wait_on_buffer (UCPI_UBH);
ubh_ll_rw_block (WRITE, 1, (struct ufs_buffer_head **) &ucpi);
ubh_wait_on_buffer (UCPI_UBH);
}
......
......@@ -298,10 +298,8 @@ static struct buffer_head * ufs_block_getfrag (struct inode *inode,
}
mark_buffer_dirty(bh);
if (IS_SYNC(inode)) {
ll_rw_block (WRITE, 1, &bh);
wait_on_buffer (bh);
}
if (IS_SYNC(inode))
sync_dirty_buffer(bh);
inode->i_ctime = CURRENT_TIME;
mark_inode_dirty(inode);
out:
......@@ -635,10 +633,8 @@ static int ufs_update_inode(struct inode * inode, int do_sync)
memset (ufs_inode, 0, sizeof(struct ufs_inode));
mark_buffer_dirty(bh);
if (do_sync) {
ll_rw_block (WRITE, 1, &bh);
wait_on_buffer (bh);
}
if (do_sync)
sync_dirty_buffer(bh);
brelse (bh);
UFSD(("EXIT\n"))
......
......@@ -284,6 +284,7 @@ next:;
}
}
if (IS_SYNC(inode) && ind_ubh && ubh_buffer_dirty(ind_ubh)) {
ubh_wait_on_buffer (ind_ubh);
ubh_ll_rw_block (WRITE, 1, &ind_ubh);
ubh_wait_on_buffer (ind_ubh);
}
......@@ -351,6 +352,7 @@ static int ufs_trunc_dindirect (struct inode * inode, unsigned offset, u32 * p)
}
}
if (IS_SYNC(inode) && dind_bh && ubh_buffer_dirty(dind_bh)) {
ubh_wait_on_buffer (dind_bh);
ubh_ll_rw_block (WRITE, 1, &dind_bh);
ubh_wait_on_buffer (dind_bh);
}
......@@ -415,6 +417,7 @@ static int ufs_trunc_tindirect (struct inode * inode)
}
}
if (IS_SYNC(inode) && tind_bh && ubh_buffer_dirty(tind_bh)) {
ubh_wait_on_buffer (tind_bh);
ubh_ll_rw_block (WRITE, 1, &tind_bh);
ubh_wait_on_buffer (tind_bh);
}
......
......@@ -169,6 +169,7 @@ struct buffer_head *alloc_buffer_head(void);
void free_buffer_head(struct buffer_head * bh);
void FASTCALL(unlock_buffer(struct buffer_head *bh));
void ll_rw_block(int, int, struct buffer_head * bh[]);
void sync_dirty_buffer(struct buffer_head *bh);
int submit_bh(int, struct buffer_head *);
void write_boundary_block(struct block_device *bdev,
sector_t bblock, unsigned blocksize);
......
......@@ -155,13 +155,8 @@ static inline void hfs_buffer_dirty(hfs_buffer buffer) {
}
static inline void hfs_buffer_sync(hfs_buffer buffer) {
while (buffer_locked(buffer)) {
wait_on_buffer(buffer);
}
if (buffer_dirty(buffer)) {
ll_rw_block(WRITE, 1, &buffer);
wait_on_buffer(buffer);
}
if (buffer_dirty(buffer))
sync_dirty_buffer(buffer);
}
static inline void *hfs_buffer_data(const hfs_buffer buffer) {
......
......@@ -208,6 +208,7 @@ EXPORT_SYMBOL(close_bdev_excl);
EXPORT_SYMBOL(__brelse);
EXPORT_SYMBOL(__bforget);
EXPORT_SYMBOL(ll_rw_block);
EXPORT_SYMBOL(sync_dirty_buffer);
EXPORT_SYMBOL(submit_bh);
EXPORT_SYMBOL(unlock_buffer);
EXPORT_SYMBOL(__wait_on_buffer);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment