Commit 6fe2ab38 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] JBD: remove jh_splice_lock

This was a strange spinlock which was designed to prevent another CPU from
ripping a buffer's journal_head away while this CPU was inspecting its state.

Really, we don't need it - we can inspect that state directly from bh->b_state.

So kill it off, along with a few things which used it which are themselves
not actually used any more.
parent 13d8498a
......@@ -149,11 +149,11 @@ static int __cleanup_transaction(journal_t *journal, transaction_t *transaction)
* BUF_CLEAN.
*/
/*
* AKPM: I think the buffer_jdirty test is redundant - it
* AKPM: I think the buffer_jbddirty test is redundant - it
* shouldn't have NULL b_transaction?
*/
next_jh = jh->b_cpnext;
if (!buffer_dirty(bh) && !buffer_jdirty(bh)) {
if (!buffer_dirty(bh) && !buffer_jbddirty(bh)) {
BUFFER_TRACE(bh, "remove from checkpoint");
__journal_remove_checkpoint(jh);
__journal_remove_journal_head(bh);
......@@ -528,7 +528,7 @@ void __journal_insert_checkpoint(struct journal_head *jh,
transaction_t *transaction)
{
JBUFFER_TRACE(jh, "entry");
J_ASSERT_JH(jh, buffer_dirty(jh2bh(jh)) || buffer_jdirty(jh2bh(jh)));
J_ASSERT_JH(jh, buffer_dirty(jh2bh(jh)) || buffer_jbddirty(jh2bh(jh)));
J_ASSERT_JH(jh, jh->b_cp_transaction == NULL);
assert_spin_locked(&journal_datalist_lock);
......
......@@ -497,7 +497,7 @@ void journal_commit_transaction(journal_t *journal)
jh = commit_transaction->t_shadow_list->b_tprev;
bh = jh2bh(jh);
clear_bit(BH_JWrite, &bh->b_state);
J_ASSERT_BH(bh, buffer_jdirty(bh));
J_ASSERT_BH(bh, buffer_jbddirty(bh));
/* The metadata is now released for reuse, but we need
to remember it against this transaction so that when
......@@ -682,7 +682,7 @@ void journal_commit_transaction(journal_t *journal)
clear_buffer_jbddirty(bh);
}
if (buffer_jdirty(bh)) {
if (buffer_jbddirty(bh)) {
JBUFFER_TRACE(jh, "add to new checkpointing trans");
__journal_insert_checkpoint(jh, commit_transaction);
JBUFFER_TRACE(jh, "refile for checkpoint writeback");
......
......@@ -116,38 +116,6 @@ static int journal_convert_superblock_v1(journal_t *, journal_superblock_t *);
*/
spinlock_t journal_datalist_lock = SPIN_LOCK_UNLOCKED;
/*
* jh_splice_lock needs explantion.
*
* In a number of places we want to do things like:
*
* if (buffer_jbd(bh) && bh2jh(bh)->foo)
*
* This is racy on SMP, because another CPU could remove the journal_head
* in the middle of this expression. We need locking.
*
* But we can greatly optimise the locking cost by testing BH_JBD
* outside the lock. So, effectively:
*
* ret = 0;
* if (buffer_jbd(bh)) {
* spin_lock(&jh_splice_lock);
* if (buffer_jbd(bh)) { (* Still there? *)
* ret = bh2jh(bh)->foo;
* }
* spin_unlock(&jh_splice_lock);
* }
* return ret;
*
* Now, that protects us from races where another CPU can remove the
* journal_head. But it doesn't defend us from the situation where another
* CPU can *add* a journal_head. This is a correctness issue. But it's not
* a problem because a) the calling code was *already* racy and b) it often
* can't happen at the call site and c) the places where we add journal_heads
* tend to be under external locking.
*/
spinlock_t jh_splice_lock = SPIN_LOCK_UNLOCKED;
/*
* List of all journals in the system. Protected by the BKL.
*/
......@@ -404,7 +372,7 @@ int journal_write_metadata_buffer(transaction_t *transaction,
* also part of a shared mapping, and another thread has
* decided to launch a writepage() against this buffer.
*/
J_ASSERT_JH(jh_in, buffer_jdirty(jh2bh(jh_in)));
J_ASSERT_JH(jh_in, buffer_jbddirty(jh2bh(jh_in)));
/*
* If a new transaction has already done a buffer copy-out, then
......@@ -1761,16 +1729,10 @@ struct journal_head *journal_add_journal_head(struct buffer_head *bh)
journal_free_journal_head(jh);
jh = bh->b_private;
} else {
/*
* We actually don't need jh_splice_lock when
* adding a journal_head - only on removal.
*/
spin_lock(&jh_splice_lock);
set_bit(BH_JBD, &bh->b_state);
bh->b_private = jh;
jh->b_bh = bh;
atomic_inc(&bh->b_count);
spin_unlock(&jh_splice_lock);
BUFFER_TRACE(bh, "added journal_head");
}
}
......@@ -1808,12 +1770,10 @@ void __journal_remove_journal_head(struct buffer_head *bh)
J_ASSERT_BH(bh, buffer_jbd(bh));
J_ASSERT_BH(bh, jh2bh(jh) == bh);
BUFFER_TRACE(bh, "remove journal_head");
spin_lock(&jh_splice_lock);
bh->b_private = NULL;
jh->b_bh = NULL; /* debug, really */
clear_bit(BH_JBD, &bh->b_state);
__brelse(bh);
spin_unlock(&jh_splice_lock);
journal_free_journal_head(jh);
} else {
BUFFER_TRACE(bh, "journal_head was locked");
......
......@@ -1131,7 +1131,7 @@ void journal_release_buffer (handle_t *handle, struct buffer_head *bh)
spin_lock(&journal_datalist_lock);
if (jh->b_jlist == BJ_Reserved && jh->b_transaction == transaction &&
!buffer_jdirty(jh2bh(jh))) {
!buffer_jbddirty(jh2bh(jh))) {
JBUFFER_TRACE(jh, "unused: refiling it");
handle->h_buffer_credits++;
__journal_refile_buffer(jh);
......@@ -1850,7 +1850,7 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh)
zap_buffer:
clear_buffer_dirty(bh);
J_ASSERT_BH(bh, !buffer_jdirty(bh));
J_ASSERT_BH(bh, !buffer_jbddirty(bh));
clear_buffer_mapped(bh);
clear_buffer_req(bh);
clear_buffer_new(bh);
......
......@@ -1084,50 +1084,6 @@ extern int jbd_blocks_per_page(struct inode *inode);
#ifdef __KERNEL__
extern spinlock_t jh_splice_lock;
/*
* Once `expr1' has been found true, take jh_splice_lock
* and then reevaluate everything.
*/
#define SPLICE_LOCK(expr1, expr2) \
({ \
int ret = (expr1); \
if (ret) { \
spin_lock(&jh_splice_lock); \
ret = (expr1) && (expr2); \
spin_unlock(&jh_splice_lock); \
} \
ret; \
})
/*
* A number of buffer state predicates. They test for
* buffer_jbd() because they are used in core kernel code.
*
* These will be racy on SMP unless we're *sure* that the
* buffer won't be detached from the journalling system
* in parallel.
*/
/* Return true if the buffer is on journal list `list' */
static inline int buffer_jlist_eq(struct buffer_head *bh, int list)
{
return SPLICE_LOCK(buffer_jbd(bh), bh2jh(bh)->b_jlist == list);
}
/* Return true if this bufer is dirty wrt the journal */
static inline int buffer_jdirty(struct buffer_head *bh)
{
return buffer_jbd(bh) && buffer_jbddirty(bh);
}
/* Return true if it's a data buffer which journalling is managing */
static inline int buffer_jbd_data(struct buffer_head *bh)
{
return SPLICE_LOCK(buffer_jbd(bh),
bh2jh(bh)->b_jlist == BJ_SyncData);
}
#ifdef CONFIG_SMP
#define assert_spin_locked(lock) J_ASSERT(spin_is_locked(lock))
#else
......@@ -1155,7 +1111,6 @@ static inline int buffer_jbd_data(struct buffer_head *bh)
#define J_ASSERT(expr) do {} while (0)
#define J_ASSERT_BH(bh, expr) do {} while (0)
#define buffer_jbd(bh) 0
#define buffer_jlist_eq(bh, val) 0
#define journal_buffer_journal_lru(bh) 0
#endif /* defined(__KERNEL__) && !defined(CONFIG_JBD) */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment