Commit 1832e934 authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Linus Torvalds

[PATCH] remove global_bufferlist_lock

This patch addresses Andrew's FIXME in buffer.c and adds a spinlock that
can be passed to the buffer list manipulation functions to the reiserfs
journal.  This obsoletes the cheks for lock beeing NULL in buffer.c and
the global_bufferlist_lock.

In addition osync_inode_list is changed to use list_for_each_prev as in 2.4
instead of a hand-crafted loop.

A little comment to the reiserfs folks:  your code would be _sooo_ much
easier to understand if you used temporary local variables for often
referenced fields..
parent a0f9f9c7
...@@ -48,15 +48,6 @@ static struct bh_wait_queue_head { ...@@ -48,15 +48,6 @@ static struct bh_wait_queue_head {
wait_queue_head_t wqh; wait_queue_head_t wqh;
} ____cacheline_aligned_in_smp bh_wait_queue_heads[1<<BH_WAIT_TABLE_ORDER]; } ____cacheline_aligned_in_smp bh_wait_queue_heads[1<<BH_WAIT_TABLE_ORDER];
/*
* Several of these buffer list functions are exported to filesystems,
* so we do funny things with the spinlocking to support those
* filesystems while still using inode->i_bufferlist_lock for
* most applications.
* FIXME: put a spinlock in the reiserfs journal and kill this lock.
*/
static spinlock_t global_bufferlist_lock = SPIN_LOCK_UNLOCKED;
/* /*
* Debug/devel support stuff * Debug/devel support stuff
*/ */
...@@ -448,8 +439,6 @@ __get_hash_table(struct block_device *bdev, sector_t block, int unused) ...@@ -448,8 +439,6 @@ __get_hash_table(struct block_device *bdev, sector_t block, int unused)
void buffer_insert_list(spinlock_t *lock, void buffer_insert_list(spinlock_t *lock,
struct buffer_head *bh, struct list_head *list) struct buffer_head *bh, struct list_head *list)
{ {
if (lock == NULL)
lock = &global_bufferlist_lock;
spin_lock(lock); spin_lock(lock);
list_del(&bh->b_inode_buffers); list_del(&bh->b_inode_buffers);
list_add(&bh->b_inode_buffers, list); list_add(&bh->b_inode_buffers, list);
...@@ -701,14 +690,10 @@ static int osync_buffers_list(spinlock_t *lock, struct list_head *list) ...@@ -701,14 +690,10 @@ static int osync_buffers_list(spinlock_t *lock, struct list_head *list)
struct list_head *p; struct list_head *p;
int err = 0; int err = 0;
if (lock == NULL)
lock = &global_bufferlist_lock;
spin_lock(lock); spin_lock(lock);
repeat: repeat:
for (p = list->prev; list_for_each_prev(p, list) {
bh = BH_ENTRY(p), p != list; bh = BH_ENTRY(p);
p = bh->b_inode_buffers.prev) {
if (buffer_locked(bh)) { if (buffer_locked(bh)) {
get_bh(bh); get_bh(bh);
spin_unlock(lock); spin_unlock(lock);
...@@ -749,9 +734,6 @@ int fsync_buffers_list(spinlock_t *lock, struct list_head *list) ...@@ -749,9 +734,6 @@ int fsync_buffers_list(spinlock_t *lock, struct list_head *list)
struct list_head tmp; struct list_head tmp;
int err = 0, err2; int err = 0, err2;
if (lock == NULL)
lock = &global_bufferlist_lock;
INIT_LIST_HEAD(&tmp); INIT_LIST_HEAD(&tmp);
spin_lock(lock); spin_lock(lock);
......
...@@ -105,9 +105,9 @@ inline void make_le_item_head (struct item_head * ih, const struct cpu_key * key ...@@ -105,9 +105,9 @@ inline void make_le_item_head (struct item_head * ih, const struct cpu_key * key
} }
static void add_to_flushlist(struct inode *inode, struct buffer_head *bh) { static void add_to_flushlist(struct inode *inode, struct buffer_head *bh) {
struct list_head *list = &(SB_JOURNAL(inode->i_sb)->j_dirty_buffers) ; struct reiserfs_journal *j = &(SB_JOURNAL(inode->i_sb)) ;
buffer_insert_list(NULL, bh, list) ; buffer_insert_list(j->dirty_buffers_lock, bh, j->j_dirty_buffers) ;
} }
// //
......
...@@ -2131,6 +2131,7 @@ int journal_init(struct super_block *p_s_sb, const char * j_dev_name, int old_fo ...@@ -2131,6 +2131,7 @@ int journal_init(struct super_block *p_s_sb, const char * j_dev_name, int old_fo
INIT_LIST_HEAD(&SB_JOURNAL(p_s_sb)->j_bitmap_nodes) ; INIT_LIST_HEAD(&SB_JOURNAL(p_s_sb)->j_bitmap_nodes) ;
INIT_LIST_HEAD(&SB_JOURNAL(p_s_sb)->j_dirty_buffers) ; INIT_LIST_HEAD(&SB_JOURNAL(p_s_sb)->j_dirty_buffers) ;
spin_lock_init(&SB_JOURNAL(p_s_sb)->j_dirty_buffers_lock) ;
reiserfs_allocate_list_bitmaps(p_s_sb, SB_JOURNAL(p_s_sb)->j_list_bitmap, reiserfs_allocate_list_bitmaps(p_s_sb, SB_JOURNAL(p_s_sb)->j_list_bitmap,
SB_BMAP_NR(p_s_sb)) ; SB_BMAP_NR(p_s_sb)) ;
allocate_bitmap_nodes(p_s_sb) ; allocate_bitmap_nodes(p_s_sb) ;
...@@ -3125,7 +3126,8 @@ printk("journal-2020: do_journal_end: BAD desc->j_len is ZERO\n") ; ...@@ -3125,7 +3126,8 @@ printk("journal-2020: do_journal_end: BAD desc->j_len is ZERO\n") ;
SB_JOURNAL_LIST_INDEX(p_s_sb) = jindex ; SB_JOURNAL_LIST_INDEX(p_s_sb) = jindex ;
/* write any buffers that must hit disk before this commit is done */ /* write any buffers that must hit disk before this commit is done */
fsync_buffers_list(NULL, &(SB_JOURNAL(p_s_sb)->j_dirty_buffers)) ; fsync_buffers_list(&(SB_JOURNAL(p_s_sb)->j_dirty_buffers_lock),
&(SB_JOURNAL(p_s_sb)->j_dirty_buffers)) ;
/* honor the flush and async wishes from the caller */ /* honor the flush and async wishes from the caller */
if (flush) { if (flush) {
......
...@@ -241,6 +241,7 @@ struct reiserfs_journal { ...@@ -241,6 +241,7 @@ struct reiserfs_journal {
int j_used_bitmap_nodes ; int j_used_bitmap_nodes ;
struct list_head j_bitmap_nodes ; struct list_head j_bitmap_nodes ;
struct list_head j_dirty_buffers ; struct list_head j_dirty_buffers ;
spinlock_t j_dirty_buffers_lock ; /* protects j_dirty_buffers */
struct reiserfs_list_bitmap j_list_bitmap[JOURNAL_NUM_BITMAPS] ; /* array of bitmaps to record the deleted blocks */ struct reiserfs_list_bitmap j_list_bitmap[JOURNAL_NUM_BITMAPS] ; /* array of bitmaps to record the deleted blocks */
struct reiserfs_journal_list j_journal_list[JOURNAL_LIST_COUNT] ; /* array of all the journal lists */ struct reiserfs_journal_list j_journal_list[JOURNAL_LIST_COUNT] ; /* array of all the journal lists */
struct reiserfs_journal_cnode *j_hash_table[JOURNAL_HASH_SIZE] ; /* hash table for real buffer heads in current trans */ struct reiserfs_journal_cnode *j_hash_table[JOURNAL_HASH_SIZE] ; /* hash table for real buffer heads in current trans */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment