Commit 2b47f8ff authored by Sergey Vojtovich's avatar Sergey Vojtovich

MDEV-10813 - Clean-up InnoDB atomics, memory barriers and mutexes

Clean-up periodic mutex/rwlock waiters wake up. This was a hack needed to
workaround broken mutexes/rwlocks implementation. We must have sane
implementations now and don't need these anymore: release thread is
guaranteed to wake up waiters.

Removed redundant ifdef that has equivalent code in both branches.

Removed os0atomic.h and os0atomic.ic: not used anymore.

Clean-up unused cmake checks.
parent 5608a737
This diff is collapsed.
/*****************************************************************************
Copyright (c) 2013, 2016, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Suite 500, Boston, MA 02110-1335 USA
*****************************************************************************/
/**************************************************//**
@file include/os0atomics.ic
The interface to the operating system synchronization primitives.
Created 2012-09-23 Sunny Bains (Split from include/os0sync.ic)
*******************************************************/
#ifdef _WIN32
#include <winbase.h>
/* Use inline functions to make 64 and 32 bit versions of windows atomic
functions so that typecasts are evaluated at compile time. Take advantage
that lint is either __int64 or long int and windows atomic functions work
on __int64 and LONG */
/**********************************************************//**
Atomic compare and exchange of unsigned integers.
@return value found before the exchange.
If it is not equal to old_value the exchange did not happen. */
UNIV_INLINE
lint
win_cmp_and_xchg_lint(
/*==================*/
volatile lint* ptr, /*!< in/out: source/destination */
lint new_val, /*!< in: exchange value */
lint old_val) /*!< in: value to compare to */
{
# ifdef _WIN64
return(InterlockedCompareExchange64(ptr, new_val, old_val));
# else
return(InterlockedCompareExchange(ptr, new_val, old_val));
# endif /* _WIN64 */
}
/**********************************************************//**
Atomic addition of signed integers.
@return Initial value of the variable pointed to by ptr */
UNIV_INLINE
lint
win_xchg_and_add(
/*=============*/
volatile lint* ptr, /*!< in/out: address of destination */
lint val) /*!< in: number to be added */
{
#ifdef _WIN64
return(InterlockedExchangeAdd64(ptr, val));
#else
return(InterlockedExchangeAdd(ptr, val));
#endif /* _WIN64 */
}
/**********************************************************//**
Atomic compare and exchange of unsigned integers.
@return value found before the exchange.
If it is not equal to old_value the exchange did not happen. */
UNIV_INLINE
ulint
win_cmp_and_xchg_ulint(
/*===================*/
volatile ulint* ptr, /*!< in/out: source/destination */
ulint new_val, /*!< in: exchange value */
ulint old_val) /*!< in: value to compare to */
{
return((ulint) win_cmp_and_xchg_lint(
(volatile lint*) ptr,
(lint) new_val,
(lint) old_val));
}
/**********************************************************//**
Atomic compare and exchange of 32-bit unsigned integers.
@return value found before the exchange.
If it is not equal to old_value the exchange did not happen. */
UNIV_INLINE
DWORD
win_cmp_and_xchg_dword(
/*===================*/
volatile DWORD* ptr, /*!< in/out: source/destination */
DWORD new_val, /*!< in: exchange value */
DWORD old_val) /*!< in: value to compare to */
{
ut_ad(sizeof(DWORD) == sizeof(LONG)); /* We assume this. */
return(InterlockedCompareExchange(
(volatile LONG*) ptr,
(LONG) new_val,
(LONG) old_val));
}
/** Do an atomic test and set.
@param[in,out] ptr Memory location to set
@param[in] new_val new value
@return old value of memory location. */
UNIV_INLINE
lock_word_t
os_atomic_test_and_set(
volatile lock_word_t* ptr,
lock_word_t new_val)
{
return(InterlockedExchange(ptr, new_val));
}
/** Do an atomic compare and set
@param[in,out] ptr Memory location to set
@param[in] old_val old value to compare
@param[in] new_val new value to set
@return the value of ptr before the operation. */
UNIV_INLINE
lock_word_t
os_atomic_val_compare_and_swap(
volatile lock_word_t* ptr,
lock_word_t old_val,
lock_word_t new_val)
{
return(static_cast<lock_word_t>(win_cmp_and_xchg_lint(
reinterpret_cast<volatile lint*>(ptr),
static_cast<lint>(new_val),
static_cast<lint>(old_val))));
}
#elif defined(HAVE_IB_GCC_ATOMIC_COMPARE_EXCHANGE)
/** Do an atomic test and set.
@param[in,out] ptr Memory location to set
@param[in] new_val new value
@return old value of memory location. */
UNIV_INLINE
lock_word_t
os_atomic_test_and_set(
volatile lock_word_t* ptr,
lock_word_t new_val)
{
lock_word_t ret;
/* Silence a compiler warning about unused ptr. */
(void) ptr;
#if defined(__powerpc__) || defined(__aarch64__)
__atomic_exchange(ptr, &new_val, &ret, __ATOMIC_SEQ_CST);
#else
__atomic_exchange(ptr, &new_val, &ret, __ATOMIC_RELEASE);
#endif
return(ret);
}
/** Do an atomic compare and set
@param[in,out] ptr Memory location to set
@param[in] old_val old value to compare
@param[in] new_val new value to set
@return the value of ptr before the operation. */
UNIV_INLINE
lock_word_t
os_atomic_val_compare_and_swap(
volatile lock_word_t* ptr,
lock_word_t old_val,
lock_word_t new_val)
{
/* Silence a compiler warning about unused ptr. */
(void) ptr;
#if defined(__powerpc__) || defined(__aarch64__)
__atomic_compare_exchange(ptr, &old_val, &new_val, false,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
#else
__atomic_compare_exchange(ptr, &old_val, &new_val, false,
__ATOMIC_RELEASE, __ATOMIC_ACQUIRE);
#endif
return(old_val);
}
#elif defined(IB_STRONG_MEMORY_MODEL)
/** Do an atomic test and set.
@param[in,out] ptr Memory location to set
@param[in] new_val new value
@return old value of memory location. */
UNIV_INLINE
lock_word_t
os_atomic_test_and_set(
volatile lock_word_t* ptr,
lock_word_t new_val)
{
return(__sync_lock_test_and_set(ptr, new_val));
}
/** Do an atomic compare and set
@param[in,out] ptr Memory location to set
@param[in] old_val old value to compare
@param[in] new_val new value to set
@return the value of ptr before the operation. */
UNIV_INLINE
lock_word_t
os_atomic_val_compare_and_swap(
volatile lock_word_t* ptr,
lock_word_t old_val,
lock_word_t new_val)
{
return(__sync_val_compare_and_swap(ptr, old_val, new_val));
}
#else
#error "Unsupported platform"
#endif /* _WIN32 */
......@@ -29,7 +29,6 @@ Created Feb 20, 2014 Vasil Dimov
#include "univ.i"
#include "os0atomic.h"
#include "ut0ut.h"
/** Execute a given function exactly once in a multi-threaded environment
......
......@@ -85,13 +85,6 @@ Note that one of the wait objects was signalled. */
void
sync_array_object_signalled();
/**********************************************************************//**
If the wakeup algorithm does not work perfectly at semaphore relases,
this function will do the waking (see the comment in mutex_exit). This
function should be called about every 1 second in the server. */
void
sync_arr_wake_threads_if_sema_free();
/**********************************************************************//**
Prints warnings of long semaphore waits to stderr.
@return TRUE if fatal semaphore wait threshold was exceeded */
......
......@@ -32,7 +32,6 @@ extern ulong srv_spin_wait_delay;
extern ulong srv_n_spin_wait_rounds;
extern ulong srv_force_recovery_crash;
#include "os0atomic.h"
#include "sync0policy.h"
#include "ib0mutex.h"
#include <set>
......@@ -45,25 +44,6 @@ extern ulong srv_force_recovery_crash;
typedef OSMutex EventMutex;
#ifndef UNIV_DEBUG
# ifdef HAVE_IB_LINUX_FUTEX
UT_MUTEX_TYPE(TTASFutexMutex, GenericPolicy, FutexMutex);
UT_MUTEX_TYPE(TTASFutexMutex, BlockMutexPolicy, BlockFutexMutex);
# endif /* HAVE_IB_LINUX_FUTEX */
UT_MUTEX_TYPE(TTASMutex, GenericPolicy, SpinMutex);
UT_MUTEX_TYPE(TTASMutex, BlockMutexPolicy, BlockSpinMutex);
UT_MUTEX_TYPE(OSTrackMutex, GenericPolicy, SysMutex);
UT_MUTEX_TYPE(OSTrackMutex, BlockMutexPolicy, BlockSysMutex);
UT_MUTEX_TYPE(TTASEventMutex, GenericPolicy, SyncArrayMutex);
UT_MUTEX_TYPE(TTASEventMutex, BlockMutexPolicy, BlockSyncArrayMutex);
#else /* !UNIV_DEBUG */
# ifdef HAVE_IB_LINUX_FUTEX
UT_MUTEX_TYPE(TTASFutexMutex, GenericPolicy, FutexMutex);
UT_MUTEX_TYPE(TTASFutexMutex, BlockMutexPolicy, BlockFutexMutex);
......@@ -78,8 +58,6 @@ UT_MUTEX_TYPE(OSTrackMutex, BlockMutexPolicy, BlockSysMutex);
UT_MUTEX_TYPE(TTASEventMutex, GenericPolicy, SyncArrayMutex);
UT_MUTEX_TYPE(TTASEventMutex, BlockMutexPolicy, BlockSyncArrayMutex);
#endif /* !UNIV_DEBUG */
#ifdef MUTEX_FUTEX
/** The default mutex type. */
typedef FutexMutex ib_mutex_t;
......
......@@ -35,10 +35,6 @@ Created 1/20/1994 Heikki Tuuri
#include "db0err.h"
#ifndef UNIV_HOTBACKUP
# include "os0atomic.h"
#endif /* UNIV_HOTBACKUP */
#include <time.h>
#ifndef MYSQL_SERVER
......
......@@ -158,200 +158,12 @@ IF(HAVE_FALLOC_PUNCH_HOLE_AND_KEEP_SIZE)
ENDIF()
IF(NOT MSVC)
# either define HAVE_IB_GCC_ATOMIC_BUILTINS or not
# either define HAVE_IB_GCC_ATOMIC_BUILTINS or not
# workaround for gcc 4.1.2 RHEL5/x86, gcc atomic ops only work under -march=i686
IF(CMAKE_SYSTEM_PROCESSOR STREQUAL "i686" AND CMAKE_COMPILER_IS_GNUCC AND
CMAKE_C_COMPILER_VERSION VERSION_LESS "4.1.3")
SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -march=i686")
SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -march=i686")
ENDIF()
CHECK_C_SOURCE(
"
int main()
{
long x;
long y;
long res;
x = 10;
y = 123;
res = __sync_bool_compare_and_swap(&x, x, y);
if (!res || x != y) {
return(1);
}
x = 10;
y = 123;
res = __sync_bool_compare_and_swap(&x, x + 1, y);
if (res || x != 10) {
return(1);
}
x = 10;
y = 123;
res = __sync_add_and_fetch(&x, y);
if (res != 123 + 10 || x != 123 + 10) {
return(1);
}
return(0);
}"
HAVE_IB_GCC_ATOMIC_BUILTINS
)
CHECK_C_SOURCE(
"
int main()
{
long res;
char c;
c = 10;
res = __sync_lock_test_and_set(&c, 123);
if (res != 10 || c != 123) {
return(1);
}
return(0);
}"
HAVE_IB_GCC_ATOMIC_BUILTINS_BYTE
)
CHECK_C_SOURCE(
"#include<stdint.h>
int main()
{
int64_t x,y,res;
x = 10;
y = 123;
res = __sync_sub_and_fetch(&y, x);
if (res != y || y != 113) {
return(1);
}
res = __sync_add_and_fetch(&y, x);
if (res != y || y != 123) {
return(1);
}
return(0);
}"
HAVE_IB_GCC_ATOMIC_BUILTINS_64
)
CHECK_C_SOURCE(
"#include<stdint.h>
int main()
{
__sync_synchronize();
return(0);
}"
HAVE_IB_GCC_SYNC_SYNCHRONISE
)
CHECK_C_SOURCE(
"#include<stdint.h>
int main()
{
__atomic_thread_fence(__ATOMIC_ACQUIRE);
__atomic_thread_fence(__ATOMIC_RELEASE);
return(0);
}"
HAVE_IB_GCC_ATOMIC_THREAD_FENCE
)
CHECK_C_SOURCE(
"#include<stdint.h>
int main()
{
unsigned char c;
__atomic_test_and_set(&c, __ATOMIC_ACQUIRE);
__atomic_clear(&c, __ATOMIC_RELEASE);
return(0);
}"
HAVE_IB_GCC_ATOMIC_TEST_AND_SET
)
CHECK_C_SOURCE_RUNS(
"#include<stdint.h>
int main()
{
unsigned char a = 0;
unsigned char b = 0;
unsigned char c = 1;
__atomic_exchange(&a, &b, &c, __ATOMIC_RELEASE);
__atomic_compare_exchange(&a, &b, &c, 0,
__ATOMIC_RELEASE, __ATOMIC_ACQUIRE);
return(0);
}"
HAVE_IB_GCC_ATOMIC_COMPARE_EXCHANGE
)
CHECK_C_SOURCE_RUNS(
"#include<stdint.h>
int main()
{
unsigned char a = 0;
unsigned char b = 0;
unsigned char c = 1;
__atomic_compare_exchange_n(&a, &b, &c, 0,
__ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
return (0);
}"
HAVE_IB_GCC_ATOMIC_SEQ_CST
)
IF (HAVE_IB_GCC_ATOMIC_SEQ_CST)
ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_CST=1)
ENDIF()
IF(HAVE_IB_GCC_ATOMIC_BUILTINS)
ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_BUILTINS=1)
ENDIF()
IF(HAVE_IB_GCC_ATOMIC_BUILTINS_BYTE)
ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_BUILTINS_BYTE=1)
ENDIF()
IF(HAVE_IB_GCC_ATOMIC_BUILTINS_64)
ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_BUILTINS_64=1)
ENDIF()
IF(HAVE_IB_GCC_SYNC_SYNCHRONISE)
ADD_DEFINITIONS(-DHAVE_IB_GCC_SYNC_SYNCHRONISE=1)
ENDIF()
IF(HAVE_IB_GCC_ATOMIC_THREAD_FENCE)
ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_THREAD_FENCE=1)
ENDIF()
IF(HAVE_IB_GCC_ATOMIC_TEST_AND_SET)
ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_TEST_AND_SET=1)
ENDIF()
IF(HAVE_IB_GCC_ATOMIC_COMPARE_EXCHANGE)
ADD_DEFINITIONS(-DHAVE_IB_GCC_ATOMIC_COMPARE_EXCHANGE=1)
ENDIF()
# either define HAVE_IB_ATOMIC_PTHREAD_T_GCC or not
IF(NOT CMAKE_CROSSCOMPILING)
CHECK_C_SOURCE_RUNS(
"
#include <pthread.h>
#include <string.h>
int main() {
pthread_t x1;
pthread_t x2;
pthread_t x3;
memset(&x1, 0x0, sizeof(x1));
memset(&x2, 0x0, sizeof(x2));
memset(&x3, 0x0, sizeof(x3));
__sync_bool_compare_and_swap(&x1, x2, x3);
return(0);
}"
HAVE_IB_ATOMIC_PTHREAD_T_GCC)
ENDIF()
IF(HAVE_IB_ATOMIC_PTHREAD_T_GCC)
ADD_DEFINITIONS(-DHAVE_IB_ATOMIC_PTHREAD_T_GCC=1)
ENDIF()
# Only use futexes on Linux if GCC atomics are available
IF(NOT MSVC AND NOT CMAKE_CROSSCOMPILING)
......@@ -402,73 +214,6 @@ IF(HAVE_C99_INITIALIZERS)
ADD_DEFINITIONS(-DHAVE_C99_INITIALIZERS)
ENDIF()
# Solaris atomics
IF(CMAKE_SYSTEM_NAME STREQUAL "SunOS")
CHECK_FUNCTION_EXISTS(atomic_cas_ulong HAVE_ATOMIC_CAS_ULONG)
CHECK_FUNCTION_EXISTS(atomic_cas_32 HAVE_ATOMIC_CAS_32)
CHECK_FUNCTION_EXISTS(atomic_cas_64 HAVE_ATOMIC_CAS_64)
CHECK_FUNCTION_EXISTS(atomic_add_long_nv HAVE_ATOMIC_ADD_LONG_NV)
CHECK_FUNCTION_EXISTS(atomic_swap_uchar HAVE_ATOMIC_SWAP_UCHAR)
IF(HAVE_ATOMIC_CAS_ULONG AND
HAVE_ATOMIC_CAS_32 AND
HAVE_ATOMIC_CAS_64 AND
HAVE_ATOMIC_ADD_LONG_NV AND
HAVE_ATOMIC_SWAP_UCHAR)
SET(HAVE_IB_SOLARIS_ATOMICS 1)
ENDIF()
IF(HAVE_IB_SOLARIS_ATOMICS)
ADD_DEFINITIONS(-DHAVE_IB_SOLARIS_ATOMICS=1)
ENDIF()
# either define HAVE_IB_ATOMIC_PTHREAD_T_SOLARIS or not
CHECK_C_SOURCE_COMPILES(
" #include <pthread.h>
#include <string.h>
int main(int argc, char** argv) {
pthread_t x1;
pthread_t x2;
pthread_t x3;
memset(&x1, 0x0, sizeof(x1));
memset(&x2, 0x0, sizeof(x2));
memset(&x3, 0x0, sizeof(x3));
if (sizeof(pthread_t) == 4) {
atomic_cas_32(&x1, x2, x3);
} else if (sizeof(pthread_t) == 8) {
atomic_cas_64(&x1, x2, x3);
} else {
return(1);
}
return(0);
}
" HAVE_IB_ATOMIC_PTHREAD_T_SOLARIS)
CHECK_C_SOURCE_COMPILES(
"#include <mbarrier.h>
int main() {
__machine_r_barrier();
__machine_w_barrier();
return(0);
}"
HAVE_IB_MACHINE_BARRIER_SOLARIS)
IF(HAVE_IB_ATOMIC_PTHREAD_T_SOLARIS)
ADD_DEFINITIONS(-DHAVE_IB_ATOMIC_PTHREAD_T_SOLARIS=1)
ENDIF()
IF(HAVE_IB_MACHINE_BARRIER_SOLARIS)
ADD_DEFINITIONS(-DHAVE_IB_MACHINE_BARRIER_SOLARIS=1)
ENDIF()
ENDIF()
IF(UNIX)
# this is needed to know which one of atomic_cas_32() or atomic_cas_64()
# to use in the source
......@@ -481,11 +226,6 @@ IF(SIZEOF_PTHREAD_T)
ADD_DEFINITIONS(-DSIZEOF_PTHREAD_T=${SIZEOF_PTHREAD_T})
ENDIF()
IF(MSVC)
ADD_DEFINITIONS(-DHAVE_WINDOWS_ATOMICS)
ADD_DEFINITIONS(-DHAVE_WINDOWS_MM_FENCE)
ENDIF()
SET(MUTEXTYPE "event" CACHE STRING "Mutex type: event, sys or futex")
IF(MUTEXTYPE MATCHES "event")
......
......@@ -152,19 +152,6 @@ srv_conc_enter_innodb_with_atomics(
return;
}
if (srv_thread_concurrency == 0) {
if (notified_mysql) {
(void) my_atomic_addlint(
&srv_conc.n_waiting, -1);
thd_wait_end(trx->mysql_thd);
}
return;
}
if (srv_conc.n_active < (lint) srv_thread_concurrency) {
ulint n_active;
......
......@@ -426,12 +426,7 @@ ulong srv_doublewrite_batch_size = 120;
ulong srv_replication_delay = 0;
/*-------------------------------------------*/
#ifdef HAVE_MEMORY_BARRIER
/* No idea to wait long with memory barriers */
UNIV_INTERN ulong srv_n_spin_wait_rounds = 15;
#else
UNIV_INTERN ulong srv_n_spin_wait_rounds = 30;
#endif
ulong srv_spin_wait_delay = 6;
ibool srv_priority_boost = TRUE;
......@@ -1896,8 +1891,6 @@ DECLARE_THREAD(srv_monitor_thread)(
/*********************************************************************//**
A thread which prints warnings about semaphore waits which have lasted
too long. These can be used to track bugs which cause hangs.
Note: In order to make sync_arr_wake_threads_if_sema_free work as expected,
we should avoid waiting any mutexes in this function!
@return a dummy parameter */
extern "C"
os_thread_ret_t
......@@ -1960,12 +1953,6 @@ DECLARE_THREAD(srv_error_monitor_thread)(
eviction policy. */
buf_LRU_stat_update();
/* In case mutex_exit is not a memory barrier, it is
theoretically possible some threads are left waiting though
the semaphore is already released. Wake up those threads: */
sync_arr_wake_threads_if_sema_free();
if (sync_array_print_long_waits(&waiter, &sema)
&& sema == old_sema && os_thread_eq(waiter, old_waiter)) {
#if defined(WITH_WSREP) && defined(WITH_INNODB_DISALLOW_WRITES)
......
......@@ -1517,17 +1517,6 @@ innobase_start_or_create_for_mysql(void)
ib::info() << "Mutexes and rw_locks use GCC atomic builtins";
#endif
ib::info() << MUTEX_TYPE;
ib::info() << IB_MEMORY_BARRIER_STARTUP_MSG;
#ifndef HAVE_MEMORY_BARRIER
#if defined __i386__ || defined __x86_64__ || defined _M_IX86 || defined _M_X64 || defined _WIN32
#else
ib::warn() << "MySQL was built without a memory barrier capability on"
" this architecture, which might allow a mutex/rw_lock"
" violation under high thread concurrency. This may cause a"
" hang.";
#endif /* IA32 or AMD64 */
#endif /* HAVE_MEMORY_BARRIER */
ib::info() << "Compressed tables use zlib " ZLIB_VERSION
#ifdef UNIV_ZIP_DEBUG
......
......@@ -961,81 +961,6 @@ sync_array_detect_deadlock(
}
#endif /* UNIV_DEBUG */
/******************************************************************//**
Determines if we can wake up the thread waiting for a sempahore. */
static
bool
sync_arr_cell_can_wake_up(
/*======================*/
sync_cell_t* cell) /*!< in: cell to search */
{
rw_lock_t* lock;
switch (cell->request_type) {
WaitMutex* mutex;
BlockWaitMutex* bpmutex;
case SYNC_MUTEX:
mutex = cell->latch.mutex;
os_rmb;
if (mutex->state() == MUTEX_STATE_UNLOCKED) {
return(true);
}
break;
case SYNC_BUF_BLOCK:
bpmutex = cell->latch.bpmutex;
os_rmb;
if (bpmutex->state() == MUTEX_STATE_UNLOCKED) {
return(true);
}
break;
case RW_LOCK_X:
case RW_LOCK_SX:
lock = cell->latch.lock;
os_rmb;
if (lock->lock_word > X_LOCK_HALF_DECR) {
/* Either unlocked or only read locked. */
return(true);
}
break;
case RW_LOCK_X_WAIT:
lock = cell->latch.lock;
/* lock_word == 0 means all readers or sx have left */
os_rmb;
if (lock->lock_word == 0) {
return(true);
}
break;
case RW_LOCK_S:
lock = cell->latch.lock;
/* lock_word > 0 means no writer or reserved writer */
os_rmb;
if (lock->lock_word > 0) {
return(true);
}
}
return(false);
}
/**********************************************************************//**
Increments the signalled count. */
void
......@@ -1045,58 +970,6 @@ sync_array_object_signalled()
++sg_count;
}
/**********************************************************************//**
If the wakeup algorithm does not work perfectly at semaphore relases,
this function will do the waking (see the comment in mutex_exit). This
function should be called about every 1 second in the server.
Note that there's a race condition between this thread and mutex_exit
changing the lock_word and calling signal_object, so sometimes this finds
threads to wake up even when nothing has gone wrong. */
static
void
sync_array_wake_threads_if_sema_free_low(
/*=====================================*/
sync_array_t* arr) /* in/out: wait array */
{
sync_array_enter(arr);
for (ulint i = 0; i < arr->next_free_slot; ++i) {
sync_cell_t* cell;
cell = sync_array_get_nth_cell(arr, i);
if (cell->latch.mutex != 0 && sync_arr_cell_can_wake_up(cell)) {
os_event_t event;
event = sync_cell_get_event(cell);
os_event_set(event);
}
}
sync_array_exit(arr);
}
/**********************************************************************//**
If the wakeup algorithm does not work perfectly at semaphore relases,
this function will do the waking (see the comment in mutex_exit). This
function should be called about every 1 second in the server.
Note that there's a race condition between this thread and mutex_exit
changing the lock_word and calling signal_object, so sometimes this finds
threads to wake up even when nothing has gone wrong. */
void
sync_arr_wake_threads_if_sema_free(void)
/*====================================*/
{
for (ulint i = 0; i < sync_array_size; ++i) {
sync_array_wake_threads_if_sema_free_low(
sync_wait_array[i]);
}
}
/**********************************************************************//**
Prints warnings of long semaphore waits to stderr.
@return TRUE if fatal semaphore wait threshold was exceeded */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment