Commit e9bdc258 authored by Mikael Ronstrom's avatar Mikael Ronstrom

Added memory barriers to the portability layer in InnoDB

Removed dependency on pthread being an int
parent 7ceb6050
...@@ -69,6 +69,46 @@ os_compare_and_swap( ...@@ -69,6 +69,46 @@ os_compare_and_swap(
#endif #endif
} }
/**************************************************************
Memory barrier for load */
UNIV_INLINE
void
os_memory_barrier_load()
{
#ifdef HAVE_GCC_ATOMIC_BUILTINS
__sync_synchronize();
#elif HAVE_SOLARIS_ATOMIC
membar_consumer();
#endif
}
/**************************************************************
Memory barrier for store */
UNIV_INLINE
void
os_memory_barrier_store()
{
#ifdef HAVE_GCC_ATOMIC_BUILTINS
__sync_synchronize();
#elif HAVE_SOLARIS_ATOMIC
membar_producer();
#endif
}
/**************************************************************
Memory barrier */
UNIV_INLINE
void
os_memory_barrier()
{
#ifdef HAVE_GCC_ATOMIC_BUILTINS
__sync_synchronize();
#elif HAVE_SOLARIS_ATOMIC
membar_enter();
#endif
}
/************************************************************** /**************************************************************
Atomic increment for InnoDB. Currently requires GCC atomic builtins. */ Atomic increment for InnoDB. Currently requires GCC atomic builtins. */
UNIV_INLINE UNIV_INLINE
......
...@@ -254,6 +254,7 @@ rw_lock_s_lock_low( ...@@ -254,6 +254,7 @@ rw_lock_s_lock_low(
return(TRUE); /* locking succeeded */ return(TRUE); /* locking succeeded */
} }
/* TODO: The "direct" functions are not used. Remove them? */
/********************************************************************** /**********************************************************************
Low-level function which locks an rw-lock in s-mode when we know that it Low-level function which locks an rw-lock in s-mode when we know that it
is possible and none else is currently accessing the rw-lock structure. is possible and none else is currently accessing the rw-lock structure.
...@@ -279,6 +280,7 @@ rw_lock_s_lock_direct( ...@@ -279,6 +280,7 @@ rw_lock_s_lock_direct(
#endif #endif
} }
/* TODO: The "direct" functions are not used. Remove them? */
/********************************************************************** /**********************************************************************
Low-level function which locks an rw-lock in x-mode when we know that it Low-level function which locks an rw-lock in x-mode when we know that it
is not locked and none else is currently accessing the rw-lock structure. is not locked and none else is currently accessing the rw-lock structure.
...@@ -451,6 +453,7 @@ rw_lock_s_unlock_func( ...@@ -451,6 +453,7 @@ rw_lock_s_unlock_func(
#endif #endif
} }
/* TODO: The "direct" functions are not used. Remove them? */
/********************************************************************** /**********************************************************************
Releases a shared mode lock when we know there are no waiters and none Releases a shared mode lock when we know there are no waiters and none
else will access the lock during the time this function is executed. */ else will access the lock during the time this function is executed. */
...@@ -489,17 +492,16 @@ rw_lock_x_unlock_func( ...@@ -489,17 +492,16 @@ rw_lock_x_unlock_func(
#endif #endif
) )
{ {
os_thread_id_t local_writer_thread; uint local_pass;
ut_ad((lock->lock_word % X_LOCK_DECR) == 0); ut_ad((lock->lock_word % X_LOCK_DECR) == 0);
/* /*
Must reset writer_thread while we still have the lock. Must reset pass while we still have the lock.
If we are not the last unlocker, we correct it later in the function, If we are not the last unlocker, we correct it later in the function,
which is harmless since we still hold the lock. which is harmless since we still hold the lock.
TODO: are there any risks of a thread id == -1 on any platform?
*/ */
local_writer_thread = lock->writer_thread; local_pass = lock->pass;
lock->writer_thread = -1; lock->pass = 1;
#ifdef UNIV_SYNC_DEBUG #ifdef UNIV_SYNC_DEBUG
rw_lock_remove_debug_info(lock, pass, RW_LOCK_EX); rw_lock_remove_debug_info(lock, pass, RW_LOCK_EX);
...@@ -516,8 +518,8 @@ rw_lock_x_unlock_func( ...@@ -516,8 +518,8 @@ rw_lock_x_unlock_func(
} }
} else { } else {
/* We still hold x-lock, so we correct writer_thread. */ /* We still hold x-lock, so we correct pass. */
lock->writer_thread = local_writer_thread; lock->pass = local_pass;
} }
ut_ad(rw_lock_validate(lock)); ut_ad(rw_lock_validate(lock));
...@@ -527,6 +529,7 @@ rw_lock_x_unlock_func( ...@@ -527,6 +529,7 @@ rw_lock_x_unlock_func(
#endif #endif
} }
/* TODO: The "direct" functions are not used. Remove them? */
/********************************************************************** /**********************************************************************
Releases an exclusive mode lock when we know there are no waiters, and Releases an exclusive mode lock when we know there are no waiters, and
none else will access the lock durint the time this function is executed. */ none else will access the lock durint the time this function is executed. */
...@@ -544,7 +547,7 @@ rw_lock_x_unlock_direct( ...@@ -544,7 +547,7 @@ rw_lock_x_unlock_direct(
#ifdef UNIV_SYNC_DEBUG #ifdef UNIV_SYNC_DEBUG
rw_lock_remove_debug_info(lock, 0, RW_LOCK_EX); rw_lock_remove_debug_info(lock, 0, RW_LOCK_EX);
#endif #endif
lock->pass = 1;
lock->lock_word += X_LOCK_DECR; lock->lock_word += X_LOCK_DECR;
ut_ad(!rw_lock_get_waiters(lock)); ut_ad(!rw_lock_get_waiters(lock));
......
...@@ -50,17 +50,16 @@ The other members of the lock obey the following rules to remain consistent: ...@@ -50,17 +50,16 @@ The other members of the lock obey the following rules to remain consistent:
pass: This is only set to 1 to prevent recursive x-locks. It must pass: This is only set to 1 to prevent recursive x-locks. It must
be set as specified by x_lock caller after the lock_word be set as specified by x_lock caller after the lock_word
indicates that the thread holds the lock, but before that indicates that the thread holds the lock, but before that
thread resumes execution. It must be reset to 0 during the thread resumes execution. It must also be set to 1 during the
final x_unlock, but before the lock_word status is updated. final x_unlock, but before the lock_word status is updated.
When an x_lock or move_ownership call wishes to change When an x_lock or move_ownership call wishes to change
pass, it must first update the writer_thread appropriately. pass, it must first update the writer_thread appropriately.
writer_thread: Must be set to the writers thread_id after the lock_word writer_thread: Must be set to the writers thread_id after the lock_word
indicates that the thread holds the lock, but before that indicates that the thread holds the lock, but before that
thread resumes execution. It must be reset to -1 during the thread resumes execution. writer_thread may be invalid and
final x_unlock, but before the lock_word status is updated. should not be read when pass == 1. A thread trying to become
This ensures that when the lock_word indicates that an x_lock writer never reads its own stale writer_thread, since it sets
is held, the only legitimate values for writer_thread are -1 pass during its previous unlock call.
(x_lock function hasn't completed) or the writer's thread_id.
waiters: May be set to 1 anytime, but to avoid unnecessary wake-up waiters: May be set to 1 anytime, but to avoid unnecessary wake-up
signals, it should only be set to 1 when there are threads signals, it should only be set to 1 when there are threads
waiting on event. Must be 1 when a writer starts waiting to waiting on event. Must be 1 when a writer starts waiting to
...@@ -210,8 +209,8 @@ rw_lock_create_func( ...@@ -210,8 +209,8 @@ rw_lock_create_func(
lock->lock_word = X_LOCK_DECR; lock->lock_word = X_LOCK_DECR;
lock->waiters = 0; lock->waiters = 0;
lock->writer_thread = -1; lock->pass = 1;
lock->pass = 0; /* We do not have to initialize writer_thread until pass == 0 */
#ifdef UNIV_SYNC_DEBUG #ifdef UNIV_SYNC_DEBUG
UT_LIST_INIT(lock->debug_list); UT_LIST_INIT(lock->debug_list);
...@@ -413,19 +412,8 @@ rw_lock_x_lock_move_ownership( ...@@ -413,19 +412,8 @@ rw_lock_x_lock_move_ownership(
ut_ad(rw_lock_is_locked(lock, RW_LOCK_EX)); ut_ad(rw_lock_is_locked(lock, RW_LOCK_EX));
#ifdef UNIV_SYNC_ATOMIC #ifdef UNIV_SYNC_ATOMIC
os_thread_id_t local_writer_thread = lock->writer_thread; lock->writer_thread = os_thread_get_curr_id();
os_thread_id_t new_writer_thread = os_thread_get_curr_id(); os_memory_barrier_store();
while (TRUE) {
if ((int)local_writer_thread != -1) {
if(os_compare_and_swap(
(volatile lint*)&(lock->writer_thread),
local_writer_thread,
new_writer_thread)) {
break;
}
}
local_writer_thread = lock->writer_thread;
}
lock->pass = 0; lock->pass = 0;
#else /* UNIV_SYNC_ATOMIC */ #else /* UNIV_SYNC_ATOMIC */
mutex_enter(&(lock->mutex)); mutex_enter(&(lock->mutex));
...@@ -518,10 +506,9 @@ rw_lock_x_lock_low( ...@@ -518,10 +506,9 @@ rw_lock_x_lock_low(
ulint line) /* in: line where requested */ ulint line) /* in: line where requested */
{ {
os_thread_id_t curr_thread = os_thread_get_curr_id(); os_thread_id_t curr_thread = os_thread_get_curr_id();
ut_ad(curr_thread != -1); /* We use -1 as the unlocked value. */
if(rw_lock_lock_word_decr(lock, X_LOCK_DECR)) { if(rw_lock_lock_word_decr(lock, X_LOCK_DECR)) {
ut_ad(lock->writer_thread == -1); ut_ad(lock->pass);
/* Decrement occurred: we are writer or next-writer. */ /* Decrement occurred: we are writer or next-writer. */
lock->writer_thread = curr_thread; lock->writer_thread = curr_thread;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment