Commit 6ea41f1e authored by Kristian Nielsen's avatar Kristian Nielsen

MDEV-7026: Race in InnoDB/XtraDB mutex implementation can stall or hang the server.

The bug was that full memory barrier was missing in the code that ensures that
a waiter on an InnoDB mutex will not go to sleep unless it is guaranteed to be
woken up again by another thread currently holding the mutex. This made
possible a race where a thread could get stuck waiting for a mutex that is in
fact no longer locked. If that thread was also holding other critical locks,
this could stall the entire server. There is an error monitor thread than can
break the stall, it runs about once per second. But if the error monitor
thread itself got stuck or was not running, then the entire server could hang
infinitely.

This was introduced on i386/amd64 platforms in 5.5.40 and 10.0.13 by an
incorrect patch that tried to fix the similar problem for PowerPC.

This commit reverts the incorrect PowerPC patch, and instead implements a fix
for PowerPC that does not change i386/amd64 behaviour, making PowerPC work
similarly to i386/amd64.
parent 496fda66
...@@ -232,10 +232,27 @@ os_fast_mutex_trylock( ...@@ -232,10 +232,27 @@ os_fast_mutex_trylock(
/*==================*/ /*==================*/
os_fast_mutex_t* fast_mutex); /*!< in: mutex to acquire */ os_fast_mutex_t* fast_mutex); /*!< in: mutex to acquire */
/**********************************************************//** /**********************************************************//**
Acquires ownership of a fast mutex. Implies a full memory barrier even on
platforms such as PowerPC where this is not normally required.
@return 0 if success, != 0 if was reserved by another thread */
UNIV_INLINE
ulint
os_fast_mutex_trylock_full_barrier(
/*==================*/
os_fast_mutex_t* fast_mutex); /*!< in: mutex to acquire */
/**********************************************************//**
Releases ownership of a fast mutex. */ Releases ownership of a fast mutex. */
UNIV_INTERN UNIV_INTERN
void void
os_fast_mutex_unlock( os_fast_mutex_unlock(
/*=================*/
os_fast_mutex_t* fast_mutex); /*!< in: mutex to release */
/**********************************************************//**
Releases ownership of a fast mutex. Implies a full memory barrier even on
platforms such as PowerPC where this is not normally required. */
UNIV_INTERN
void
os_fast_mutex_unlock_full_barrier(
/*=================*/ /*=================*/
os_fast_mutex_t* fast_mutex); /*!< in: mutex to release */ os_fast_mutex_t* fast_mutex); /*!< in: mutex to release */
/*********************************************************//** /*********************************************************//**
...@@ -307,11 +324,28 @@ amount of increment. */ ...@@ -307,11 +324,28 @@ amount of increment. */
/**********************************************************//** /**********************************************************//**
Returns the old value of *ptr, atomically sets *ptr to new_val */ Returns the old value of *ptr, atomically sets *ptr to new_val */
# define os_atomic_test_and_set_byte(ptr, new_val) \ #ifdef __powerpc__
/*
os_atomic_test_and_set_byte_release() should imply a release barrier before
setting, and a full barrier after. But __sync_lock_test_and_set() is only
documented as an aquire barrier. So on PowerPC we need to add the full
barrier explicitly. */
# define os_atomic_test_and_set_byte_release(ptr, new_val) \
do { __sync_lock_release(ptr); \
__sync_synchronize(); } while (0)
#else
/*
On x86, __sync_lock_test_and_set() happens to be full barrier, due to
LOCK prefix.
*/
# define os_atomic_test_and_set_byte_release(ptr, new_val) \
__sync_lock_test_and_set(ptr, (byte) new_val)
#endif
/*
os_atomic_test_and_set_byte_acquire() is a full memory barrier on x86. But
in general, just an aquire barrier should be sufficient. */
# define os_atomic_test_and_set_byte_acquire(ptr, new_val) \
__sync_lock_test_and_set(ptr, (byte) new_val) __sync_lock_test_and_set(ptr, (byte) new_val)
# define os_atomic_lock_release_byte(ptr) \
__sync_lock_release(ptr)
#elif defined(HAVE_IB_SOLARIS_ATOMICS) #elif defined(HAVE_IB_SOLARIS_ATOMICS)
...@@ -363,11 +397,10 @@ amount of increment. */ ...@@ -363,11 +397,10 @@ amount of increment. */
/**********************************************************//** /**********************************************************//**
Returns the old value of *ptr, atomically sets *ptr to new_val */ Returns the old value of *ptr, atomically sets *ptr to new_val */
# define os_atomic_test_and_set_byte(ptr, new_val) \ # define os_atomic_test_and_set_byte_acquire(ptr, new_val) \
atomic_swap_uchar(ptr, new_val)
# define os_atomic_test_and_set_byte_release(ptr, new_val) \
atomic_swap_uchar(ptr, new_val) atomic_swap_uchar(ptr, new_val)
# define os_atomic_lock_release_byte(ptr) \
(void) atomic_swap_uchar(ptr, 0)
#elif defined(HAVE_WINDOWS_ATOMICS) #elif defined(HAVE_WINDOWS_ATOMICS)
...@@ -414,7 +447,9 @@ Returns the old value of *ptr, atomically sets *ptr to new_val. ...@@ -414,7 +447,9 @@ Returns the old value of *ptr, atomically sets *ptr to new_val.
InterlockedExchange() operates on LONG, and the LONG will be InterlockedExchange() operates on LONG, and the LONG will be
clobbered */ clobbered */
# define os_atomic_test_and_set_byte(ptr, new_val) \ # define os_atomic_test_and_set_byte_acquire(ptr, new_val) \
((byte) InterlockedExchange(ptr, new_val))
# define os_atomic_test_and_set_byte_release(ptr, new_val) \
((byte) InterlockedExchange(ptr, new_val)) ((byte) InterlockedExchange(ptr, new_val))
#else #else
...@@ -427,11 +462,7 @@ clobbered */ ...@@ -427,11 +462,7 @@ clobbered */
# define HAVE_MEMORY_BARRIER # define HAVE_MEMORY_BARRIER
# define os_rmb __atomic_thread_fence(__ATOMIC_ACQUIRE) # define os_rmb __atomic_thread_fence(__ATOMIC_ACQUIRE)
# define os_wmb __atomic_thread_fence(__ATOMIC_RELEASE) # define os_wmb __atomic_thread_fence(__ATOMIC_RELEASE)
#ifdef __powerpc__ # define os_mb __atomic_thread_fence(__ATOMIC_SEQ_CST)
# define os_isync __asm __volatile ("isync":::"memory")
#else
#define os_isync do { } while(0)
#endif
# define IB_MEMORY_BARRIER_STARTUP_MSG \ # define IB_MEMORY_BARRIER_STARTUP_MSG \
"GCC builtin __atomic_thread_fence() is used for memory barrier" "GCC builtin __atomic_thread_fence() is used for memory barrier"
...@@ -440,7 +471,7 @@ clobbered */ ...@@ -440,7 +471,7 @@ clobbered */
# define HAVE_MEMORY_BARRIER # define HAVE_MEMORY_BARRIER
# define os_rmb __sync_synchronize() # define os_rmb __sync_synchronize()
# define os_wmb __sync_synchronize() # define os_wmb __sync_synchronize()
# define os_isync __sync_synchronize() # define os_mb __sync_synchronize()
# define IB_MEMORY_BARRIER_STARTUP_MSG \ # define IB_MEMORY_BARRIER_STARTUP_MSG \
"GCC builtin __sync_synchronize() is used for memory barrier" "GCC builtin __sync_synchronize() is used for memory barrier"
...@@ -449,7 +480,7 @@ clobbered */ ...@@ -449,7 +480,7 @@ clobbered */
# include <mbarrier.h> # include <mbarrier.h>
# define os_rmb __machine_r_barrier() # define os_rmb __machine_r_barrier()
# define os_wmb __machine_w_barrier() # define os_wmb __machine_w_barrier()
# define os_isync os_rmb; os_wmb # define os_mb __machine_rw_barrier()
# define IB_MEMORY_BARRIER_STARTUP_MSG \ # define IB_MEMORY_BARRIER_STARTUP_MSG \
"Soralis memory ordering functions are used for memory barrier" "Soralis memory ordering functions are used for memory barrier"
...@@ -458,17 +489,14 @@ clobbered */ ...@@ -458,17 +489,14 @@ clobbered */
# include <intrin.h> # include <intrin.h>
# define os_rmb _mm_lfence() # define os_rmb _mm_lfence()
# define os_wmb _mm_sfence() # define os_wmb _mm_sfence()
# define os_isync os_rmb; os_wmb # define os_mb _mm_mfence()
# define IB_MEMORY_BARRIER_STARTUP_MSG \ # define IB_MEMORY_BARRIER_STARTUP_MSG \
"_mm_lfence() and _mm_sfence() are used for memory barrier" "_mm_lfence() and _mm_sfence() are used for memory barrier"
# define os_atomic_lock_release_byte(ptr) \
(void) InterlockedExchange(ptr, 0)
#else #else
# define os_rmb do { } while(0) # define os_rmb do { } while(0)
# define os_wmb do { } while(0) # define os_wmb do { } while(0)
# define os_isync do { } while(0) # define os_mb do { } while(0)
# define IB_MEMORY_BARRIER_STARTUP_MSG \ # define IB_MEMORY_BARRIER_STARTUP_MSG \
"Memory barrier is not used" "Memory barrier is not used"
#endif #endif
......
...@@ -54,3 +54,35 @@ os_fast_mutex_trylock( ...@@ -54,3 +54,35 @@ os_fast_mutex_trylock(
return((ulint) pthread_mutex_trylock(fast_mutex)); return((ulint) pthread_mutex_trylock(fast_mutex));
#endif #endif
} }
/**********************************************************//**
Acquires ownership of a fast mutex. Implies a full memory barrier even on
platforms such as PowerPC where this is not normally required.
@return 0 if success, != 0 if was reserved by another thread */
UNIV_INLINE
ulint
os_fast_mutex_trylock_full_barrier(
/*==================*/
os_fast_mutex_t* fast_mutex) /*!< in: mutex to acquire */
{
#ifdef __WIN__
if (TryEnterCriticalSection(fast_mutex)) {
return(0);
} else {
return(1);
}
#else
/* NOTE that the MySQL my_pthread.h redefines pthread_mutex_trylock
so that it returns 0 on success. In the operating system
libraries, HP-UX-10.20 follows the old Posix 1003.4a Draft 4 and
returns 1 on success (but MySQL remaps that to 0), while Linux,
FreeBSD, Solaris, AIX, Tru64 Unix, HP-UX-11.0 return 0 on success. */
#ifdef __powerpc__
os_mb;
#endif
return((ulint) pthread_mutex_trylock(fast_mutex));
#endif
}
...@@ -80,11 +80,11 @@ mutex_test_and_set( ...@@ -80,11 +80,11 @@ mutex_test_and_set(
mutex_t* mutex) /*!< in: mutex */ mutex_t* mutex) /*!< in: mutex */
{ {
#if defined(HAVE_ATOMIC_BUILTINS) #if defined(HAVE_ATOMIC_BUILTINS)
return(os_atomic_test_and_set_byte(&mutex->lock_word, 1)); return(os_atomic_test_and_set_byte_acquire(&mutex->lock_word, 1));
#else #else
ibool ret; ibool ret;
ret = os_fast_mutex_trylock(&(mutex->os_fast_mutex)); ret = os_fast_mutex_trylock_full_barrier(&(mutex->os_fast_mutex));
if (ret == 0) { if (ret == 0) {
/* We check that os_fast_mutex_trylock does not leak /* We check that os_fast_mutex_trylock does not leak
...@@ -92,7 +92,6 @@ mutex_test_and_set( ...@@ -92,7 +92,6 @@ mutex_test_and_set(
ut_a(mutex->lock_word == 0); ut_a(mutex->lock_word == 0);
mutex->lock_word = 1; mutex->lock_word = 1;
os_wmb;
} }
return((byte)ret); return((byte)ret);
...@@ -109,11 +108,14 @@ mutex_reset_lock_word( ...@@ -109,11 +108,14 @@ mutex_reset_lock_word(
mutex_t* mutex) /*!< in: mutex */ mutex_t* mutex) /*!< in: mutex */
{ {
#if defined(HAVE_ATOMIC_BUILTINS) #if defined(HAVE_ATOMIC_BUILTINS)
os_atomic_lock_release_byte(&mutex->lock_word); /* In theory __sync_lock_release should be used to release the lock.
Unfortunately, it does not work properly alone. The workaround is
that more conservative __sync_lock_test_and_set is used instead. */
os_atomic_test_and_set_byte_release(&mutex->lock_word, 0);
#else #else
mutex->lock_word = 0; mutex->lock_word = 0;
os_fast_mutex_unlock(&(mutex->os_fast_mutex)); os_fast_mutex_unlock_full_barrier(&(mutex->os_fast_mutex));
#endif #endif
} }
...@@ -145,7 +147,6 @@ mutex_get_waiters( ...@@ -145,7 +147,6 @@ mutex_get_waiters(
ptr = &(mutex->waiters); ptr = &(mutex->waiters);
os_rmb;
return(*ptr); /* Here we assume that the read of a single return(*ptr); /* Here we assume that the read of a single
word from memory is atomic */ word from memory is atomic */
} }
...@@ -180,7 +181,6 @@ mutex_exit_func( ...@@ -180,7 +181,6 @@ mutex_exit_func(
to wake up possible hanging threads if to wake up possible hanging threads if
they are missed in mutex_signal_object. */ they are missed in mutex_signal_object. */
os_isync;
if (mutex_get_waiters(mutex) != 0) { if (mutex_get_waiters(mutex) != 0) {
mutex_signal_object(mutex); mutex_signal_object(mutex);
......
...@@ -887,6 +887,25 @@ os_fast_mutex_unlock( ...@@ -887,6 +887,25 @@ os_fast_mutex_unlock(
#endif #endif
} }
/**********************************************************//**
Releases ownership of a fast mutex. Implies a full memory barrier even on
platforms such as PowerPC where this is not normally required. */
UNIV_INTERN
void
os_fast_mutex_unlock_full_barrier(
/*=================*/
os_fast_mutex_t* fast_mutex) /*!< in: mutex to release */
{
#ifdef __WIN__
LeaveCriticalSection(fast_mutex);
#else
pthread_mutex_unlock(fast_mutex);
#ifdef __powerpc__
os_mb;
#endif
#endif
}
/**********************************************************//** /**********************************************************//**
Frees a mutex object. */ Frees a mutex object. */
UNIV_INTERN UNIV_INTERN
......
...@@ -474,8 +474,6 @@ mutex_set_waiters( ...@@ -474,8 +474,6 @@ mutex_set_waiters(
ptr = &(mutex->waiters); ptr = &(mutex->waiters);
os_wmb;
*ptr = n; /* Here we assume that the write of a single *ptr = n; /* Here we assume that the write of a single
word in memory is atomic */ word in memory is atomic */
} }
......
...@@ -232,10 +232,27 @@ os_fast_mutex_trylock( ...@@ -232,10 +232,27 @@ os_fast_mutex_trylock(
/*==================*/ /*==================*/
os_fast_mutex_t* fast_mutex); /*!< in: mutex to acquire */ os_fast_mutex_t* fast_mutex); /*!< in: mutex to acquire */
/**********************************************************//** /**********************************************************//**
Acquires ownership of a fast mutex. Implies a full memory barrier even on
platforms such as PowerPC where this is not normally required.
@return 0 if success, != 0 if was reserved by another thread */
UNIV_INLINE
ulint
os_fast_mutex_trylock_full_barrier(
/*==================*/
os_fast_mutex_t* fast_mutex); /*!< in: mutex to acquire */
/**********************************************************//**
Releases ownership of a fast mutex. */ Releases ownership of a fast mutex. */
UNIV_INTERN UNIV_INTERN
void void
os_fast_mutex_unlock( os_fast_mutex_unlock(
/*=================*/
os_fast_mutex_t* fast_mutex); /*!< in: mutex to release */
/**********************************************************//**
Releases ownership of a fast mutex. Implies a full memory barrier even on
platforms such as PowerPC where this is not normally required. */
UNIV_INTERN
void
os_fast_mutex_unlock_full_barrier(
/*=================*/ /*=================*/
os_fast_mutex_t* fast_mutex); /*!< in: mutex to release */ os_fast_mutex_t* fast_mutex); /*!< in: mutex to release */
/*********************************************************//** /*********************************************************//**
...@@ -314,11 +331,28 @@ amount of increment. */ ...@@ -314,11 +331,28 @@ amount of increment. */
/**********************************************************//** /**********************************************************//**
Returns the old value of *ptr, atomically sets *ptr to new_val */ Returns the old value of *ptr, atomically sets *ptr to new_val */
# define os_atomic_test_and_set_byte(ptr, new_val) \ #ifdef __powerpc__
/*
os_atomic_test_and_set_byte_release() should imply a release barrier before
setting, and a full barrier after. But __sync_lock_test_and_set() is only
documented as an aquire barrier. So on PowerPC we need to add the full
barrier explicitly. */
# define os_atomic_test_and_set_byte_release(ptr, new_val) \
do { __sync_lock_release(ptr); \
__sync_synchronize(); } while (0)
#else
/*
On x86, __sync_lock_test_and_set() happens to be full barrier, due to
LOCK prefix.
*/
# define os_atomic_test_and_set_byte_release(ptr, new_val) \
__sync_lock_test_and_set(ptr, (byte) new_val)
#endif
/*
os_atomic_test_and_set_byte_acquire() is a full memory barrier on x86. But
in general, just an aquire barrier should be sufficient. */
# define os_atomic_test_and_set_byte_acquire(ptr, new_val) \
__sync_lock_test_and_set(ptr, (byte) new_val) __sync_lock_test_and_set(ptr, (byte) new_val)
# define os_atomic_lock_release_byte(ptr) \
__sync_lock_release(ptr)
#elif defined(HAVE_IB_SOLARIS_ATOMICS) #elif defined(HAVE_IB_SOLARIS_ATOMICS)
...@@ -374,11 +408,10 @@ amount of increment. */ ...@@ -374,11 +408,10 @@ amount of increment. */
/**********************************************************//** /**********************************************************//**
Returns the old value of *ptr, atomically sets *ptr to new_val */ Returns the old value of *ptr, atomically sets *ptr to new_val */
# define os_atomic_test_and_set_byte(ptr, new_val) \ # define os_atomic_test_and_set_byte_acquire(ptr, new_val) \
atomic_swap_uchar(ptr, new_val)
# define os_atomic_test_and_set_byte_release(ptr, new_val) \
atomic_swap_uchar(ptr, new_val) atomic_swap_uchar(ptr, new_val)
# define os_atomic_lock_release_byte(ptr) \
(void) atomic_swap_uchar(ptr, 0)
#elif defined(HAVE_WINDOWS_ATOMICS) #elif defined(HAVE_WINDOWS_ATOMICS)
...@@ -434,7 +467,9 @@ Returns the old value of *ptr, atomically sets *ptr to new_val. ...@@ -434,7 +467,9 @@ Returns the old value of *ptr, atomically sets *ptr to new_val.
InterlockedExchange() operates on LONG, and the LONG will be InterlockedExchange() operates on LONG, and the LONG will be
clobbered */ clobbered */
# define os_atomic_test_and_set_byte(ptr, new_val) \ # define os_atomic_test_and_set_byte_acquire(ptr, new_val) \
((byte) InterlockedExchange(ptr, new_val))
# define os_atomic_test_and_set_byte_release(ptr, new_val) \
((byte) InterlockedExchange(ptr, new_val)) ((byte) InterlockedExchange(ptr, new_val))
#else #else
...@@ -447,11 +482,7 @@ clobbered */ ...@@ -447,11 +482,7 @@ clobbered */
# define HAVE_MEMORY_BARRIER # define HAVE_MEMORY_BARRIER
# define os_rmb __atomic_thread_fence(__ATOMIC_ACQUIRE) # define os_rmb __atomic_thread_fence(__ATOMIC_ACQUIRE)
# define os_wmb __atomic_thread_fence(__ATOMIC_RELEASE) # define os_wmb __atomic_thread_fence(__ATOMIC_RELEASE)
#ifdef __powerpc__ # define os_mb __atomic_thread_fence(__ATOMIC_SEQ_CST)
# define os_isync __asm __volatile ("isync":::"memory")
#else
#define os_isync do { } while(0)
#endif
# define IB_MEMORY_BARRIER_STARTUP_MSG \ # define IB_MEMORY_BARRIER_STARTUP_MSG \
"GCC builtin __atomic_thread_fence() is used for memory barrier" "GCC builtin __atomic_thread_fence() is used for memory barrier"
...@@ -460,7 +491,7 @@ clobbered */ ...@@ -460,7 +491,7 @@ clobbered */
# define HAVE_MEMORY_BARRIER # define HAVE_MEMORY_BARRIER
# define os_rmb __sync_synchronize() # define os_rmb __sync_synchronize()
# define os_wmb __sync_synchronize() # define os_wmb __sync_synchronize()
# define os_isync __sync_synchronize() # define os_mb __sync_synchronize()
# define IB_MEMORY_BARRIER_STARTUP_MSG \ # define IB_MEMORY_BARRIER_STARTUP_MSG \
"GCC builtin __sync_synchronize() is used for memory barrier" "GCC builtin __sync_synchronize() is used for memory barrier"
...@@ -469,7 +500,7 @@ clobbered */ ...@@ -469,7 +500,7 @@ clobbered */
# include <mbarrier.h> # include <mbarrier.h>
# define os_rmb __machine_r_barrier() # define os_rmb __machine_r_barrier()
# define os_wmb __machine_w_barrier() # define os_wmb __machine_w_barrier()
# define os_isync os_rmb; os_wmb # define os_mb __machine_rw_barrier()
# define IB_MEMORY_BARRIER_STARTUP_MSG \ # define IB_MEMORY_BARRIER_STARTUP_MSG \
"Soralis memory ordering functions are used for memory barrier" "Soralis memory ordering functions are used for memory barrier"
...@@ -478,17 +509,14 @@ clobbered */ ...@@ -478,17 +509,14 @@ clobbered */
# include <intrin.h> # include <intrin.h>
# define os_rmb _mm_lfence() # define os_rmb _mm_lfence()
# define os_wmb _mm_sfence() # define os_wmb _mm_sfence()
# define os_isync os_rmb; os_wmb # define os_mb _mm_mfence()
# define IB_MEMORY_BARRIER_STARTUP_MSG \ # define IB_MEMORY_BARRIER_STARTUP_MSG \
"_mm_lfence() and _mm_sfence() are used for memory barrier" "_mm_lfence() and _mm_sfence() are used for memory barrier"
# define os_atomic_lock_release_byte(ptr) \
(void) InterlockedExchange(ptr, 0)
#else #else
# define os_rmb do { } while(0) # define os_rmb do { } while(0)
# define os_wmb do { } while(0) # define os_wmb do { } while(0)
# define os_isync do { } while(0) # define os_mb do { } while(0)
# define IB_MEMORY_BARRIER_STARTUP_MSG \ # define IB_MEMORY_BARRIER_STARTUP_MSG \
"Memory barrier is not used" "Memory barrier is not used"
#endif #endif
......
...@@ -54,3 +54,35 @@ os_fast_mutex_trylock( ...@@ -54,3 +54,35 @@ os_fast_mutex_trylock(
return((ulint) pthread_mutex_trylock(fast_mutex)); return((ulint) pthread_mutex_trylock(fast_mutex));
#endif #endif
} }
/**********************************************************//**
Acquires ownership of a fast mutex. Implies a full memory barrier even on
platforms such as PowerPC where this is not normally required.
@return 0 if success, != 0 if was reserved by another thread */
UNIV_INLINE
ulint
os_fast_mutex_trylock_full_barrier(
/*==================*/
os_fast_mutex_t* fast_mutex) /*!< in: mutex to acquire */
{
#ifdef __WIN__
if (TryEnterCriticalSection(fast_mutex)) {
return(0);
} else {
return(1);
}
#else
/* NOTE that the MySQL my_pthread.h redefines pthread_mutex_trylock
so that it returns 0 on success. In the operating system
libraries, HP-UX-10.20 follows the old Posix 1003.4a Draft 4 and
returns 1 on success (but MySQL remaps that to 0), while Linux,
FreeBSD, Solaris, AIX, Tru64 Unix, HP-UX-11.0 return 0 on success. */
#ifdef __powerpc__
os_mb;
#endif
return((ulint) pthread_mutex_trylock(fast_mutex));
#endif
}
...@@ -80,11 +80,11 @@ mutex_test_and_set( ...@@ -80,11 +80,11 @@ mutex_test_and_set(
mutex_t* mutex) /*!< in: mutex */ mutex_t* mutex) /*!< in: mutex */
{ {
#if defined(HAVE_ATOMIC_BUILTINS) #if defined(HAVE_ATOMIC_BUILTINS)
return(os_atomic_test_and_set_byte(&mutex->lock_word, 1)); return(os_atomic_test_and_set_byte_acquire(&mutex->lock_word, 1));
#else #else
ibool ret; ibool ret;
ret = os_fast_mutex_trylock(&(mutex->os_fast_mutex)); ret = os_fast_mutex_trylock_full_barrier(&(mutex->os_fast_mutex));
if (ret == 0) { if (ret == 0) {
/* We check that os_fast_mutex_trylock does not leak /* We check that os_fast_mutex_trylock does not leak
...@@ -92,7 +92,6 @@ mutex_test_and_set( ...@@ -92,7 +92,6 @@ mutex_test_and_set(
ut_a(mutex->lock_word == 0); ut_a(mutex->lock_word == 0);
mutex->lock_word = 1; mutex->lock_word = 1;
os_wmb;
} }
return((byte)ret); return((byte)ret);
...@@ -109,11 +108,14 @@ mutex_reset_lock_word( ...@@ -109,11 +108,14 @@ mutex_reset_lock_word(
mutex_t* mutex) /*!< in: mutex */ mutex_t* mutex) /*!< in: mutex */
{ {
#if defined(HAVE_ATOMIC_BUILTINS) #if defined(HAVE_ATOMIC_BUILTINS)
os_atomic_lock_release_byte(&mutex->lock_word); /* In theory __sync_lock_release should be used to release the lock.
Unfortunately, it does not work properly alone. The workaround is
that more conservative __sync_lock_test_and_set is used instead. */
os_atomic_test_and_set_byte_release(&mutex->lock_word, 0);
#else #else
mutex->lock_word = 0; mutex->lock_word = 0;
os_fast_mutex_unlock(&(mutex->os_fast_mutex)); os_fast_mutex_unlock_full_barrier(&(mutex->os_fast_mutex));
#endif #endif
} }
...@@ -145,7 +147,6 @@ mutex_get_waiters( ...@@ -145,7 +147,6 @@ mutex_get_waiters(
ptr = &(mutex->waiters); ptr = &(mutex->waiters);
os_rmb;
return(*ptr); /* Here we assume that the read of a single return(*ptr); /* Here we assume that the read of a single
word from memory is atomic */ word from memory is atomic */
} }
...@@ -180,7 +181,6 @@ mutex_exit_func( ...@@ -180,7 +181,6 @@ mutex_exit_func(
to wake up possible hanging threads if to wake up possible hanging threads if
they are missed in mutex_signal_object. */ they are missed in mutex_signal_object. */
os_isync;
if (mutex_get_waiters(mutex) != 0) { if (mutex_get_waiters(mutex) != 0) {
mutex_signal_object(mutex); mutex_signal_object(mutex);
......
...@@ -887,6 +887,25 @@ os_fast_mutex_unlock( ...@@ -887,6 +887,25 @@ os_fast_mutex_unlock(
#endif #endif
} }
/**********************************************************//**
Releases ownership of a fast mutex. Implies a full memory barrier even on
platforms such as PowerPC where this is not normally required. */
UNIV_INTERN
void
os_fast_mutex_unlock_full_barrier(
/*=================*/
os_fast_mutex_t* fast_mutex) /*!< in: mutex to release */
{
#ifdef __WIN__
LeaveCriticalSection(fast_mutex);
#else
pthread_mutex_unlock(fast_mutex);
#ifdef __powerpc__
os_mb;
#endif
#endif
}
/**********************************************************//** /**********************************************************//**
Frees a mutex object. */ Frees a mutex object. */
UNIV_INTERN UNIV_INTERN
......
...@@ -482,8 +482,6 @@ mutex_set_waiters( ...@@ -482,8 +482,6 @@ mutex_set_waiters(
ptr = &(mutex->waiters); ptr = &(mutex->waiters);
os_wmb;
*ptr = n; /* Here we assume that the write of a single *ptr = n; /* Here we assume that the write of a single
word in memory is atomic */ word in memory is atomic */
#endif #endif
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment