Commit baf6b527 authored by inaam's avatar inaam

branches/5.1: Port of r2267

This is a combination of changes that forward port the scalability fix applied to 5.0
through r1001.
It reverts changes r149 and r122 (these were 5.1 specific changes made in lieu of
scalability fix of 5.0)
Then it applies r1001 to 5.0 which is the original scalability fix.
Finally it applies r2082 which fixes an issue with the original fix.

Reviewed by: Heikki
parent fc40679f
...@@ -112,9 +112,13 @@ os_event_set( ...@@ -112,9 +112,13 @@ os_event_set(
os_event_t event); /* in: event to set */ os_event_t event); /* in: event to set */
/************************************************************** /**************************************************************
Resets an event semaphore to the nonsignaled state. Waiting threads will Resets an event semaphore to the nonsignaled state. Waiting threads will
stop to wait for the event. */ stop to wait for the event.
The return value should be passed to os_even_wait_low() if it is desired
that this thread should not wait in case of an intervening call to
os_event_set() between this os_event_reset() and the
os_event_wait_low() call. See comments for os_event_wait_low(). */
void ib_longlong
os_event_reset( os_event_reset(
/*===========*/ /*===========*/
os_event_t event); /* in: event to reset */ os_event_t event); /* in: event to reset */
...@@ -125,16 +129,38 @@ void ...@@ -125,16 +129,38 @@ void
os_event_free( os_event_free(
/*==========*/ /*==========*/
os_event_t event); /* in: event to free */ os_event_t event); /* in: event to free */
/************************************************************** /**************************************************************
Waits for an event object until it is in the signaled state. If Waits for an event object until it is in the signaled state. If
srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS this also exits the srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS this also exits the
waiting thread when the event becomes signaled (or immediately if the waiting thread when the event becomes signaled (or immediately if the
event is already in the signaled state). */ event is already in the signaled state).
Typically, if the event has been signalled after the os_event_reset()
we'll return immediately because event->is_set == TRUE.
There are, however, situations (e.g.: sync_array code) where we may
lose this information. For example:
thread A calls os_event_reset()
thread B calls os_event_set() [event->is_set == TRUE]
thread C calls os_event_reset() [event->is_set == FALSE]
thread A calls os_event_wait() [infinite wait!]
thread C calls os_event_wait() [infinite wait!]
Where such a scenario is possible, to avoid infinite wait, the
value returned by os_event_reset() should be passed in as
reset_sig_count. */
#define os_event_wait(event) os_event_wait_low((event), 0)
void void
os_event_wait( os_event_wait_low(
/*==========*/ /*==============*/
os_event_t event); /* in: event to wait */ os_event_t event, /* in: event to wait */
ib_longlong reset_sig_count);/* in: zero or the value
returned by previous call of
os_event_reset(). */
/************************************************************** /**************************************************************
Waits for an event object until it is in the signaled state or Waits for an event object until it is in the signaled state or
a timeout is exceeded. In Unix the timeout is always infinite. */ a timeout is exceeded. In Unix the timeout is always infinite. */
......
...@@ -66,26 +66,21 @@ sync_array_wait_event( ...@@ -66,26 +66,21 @@ sync_array_wait_event(
sync_array_t* arr, /* in: wait array */ sync_array_t* arr, /* in: wait array */
ulint index); /* in: index of the reserved cell */ ulint index); /* in: index of the reserved cell */
/********************************************************************** /**********************************************************************
Frees the cell safely by reserving the sync array mutex and decrementing Frees the cell. NOTE! sync_array_wait_event frees the cell
n_reserved if necessary. Should only be called from mutex_spin_wait. */ automatically! */
void void
sync_array_free_cell_protected( sync_array_free_cell(
/*===========================*/ /*=================*/
sync_array_t* arr, /* in: wait array */ sync_array_t* arr, /* in: wait array */
ulint index); /* in: index of the cell in array */ ulint index); /* in: index of the cell in array */
/************************************************************************** /**************************************************************************
Looks for the cells in the wait array which refer Note that one of the wait objects was signalled. */
to the wait object specified,
and sets their corresponding events to the signaled state. In this
way releases the threads waiting for the object to contend for the object.
It is possible that no such cell is found, in which case does nothing. */
void void
sync_array_signal_object( sync_array_object_signalled(
/*=====================*/ /*========================*/
sync_array_t* arr, /* in: wait array */ sync_array_t* arr); /* in: wait array */
void* object);/* in: wait object */
/************************************************************************** /**************************************************************************
If the wakeup algorithm does not work perfectly at semaphore relases, If the wakeup algorithm does not work perfectly at semaphore relases,
this function will do the waking (see the comment in mutex_exit). This this function will do the waking (see the comment in mutex_exit). This
......
...@@ -421,6 +421,18 @@ blocked by readers, a writer may queue for the lock by setting the writer ...@@ -421,6 +421,18 @@ blocked by readers, a writer may queue for the lock by setting the writer
field. Then no new readers are allowed in. */ field. Then no new readers are allowed in. */
struct rw_lock_struct { struct rw_lock_struct {
os_event_t event; /* Used by sync0arr.c for thread queueing */
#ifdef __WIN__
os_event_t wait_ex_event; /* This windows specific event is
used by the thread which has set the
lock state to RW_LOCK_WAIT_EX. The
rw_lock design guarantees that this
thread will be the next one to proceed
once the current the event gets
signalled. See LEMMA 2 in sync0sync.c */
#endif
ulint reader_count; /* Number of readers who have locked this ulint reader_count; /* Number of readers who have locked this
lock in the shared mode */ lock in the shared mode */
ulint writer; /* This field is set to RW_LOCK_EX if there ulint writer; /* This field is set to RW_LOCK_EX if there
......
...@@ -381,7 +381,11 @@ rw_lock_s_unlock_func( ...@@ -381,7 +381,11 @@ rw_lock_s_unlock_func(
mutex_exit(mutex); mutex_exit(mutex);
if (UNIV_UNLIKELY(sg)) { if (UNIV_UNLIKELY(sg)) {
sync_array_signal_object(sync_primary_wait_array, lock); #ifdef __WIN__
os_event_set(lock->wait_ex_event);
#endif
os_event_set(lock->event);
sync_array_object_signalled(sync_primary_wait_array);
} }
ut_ad(rw_lock_validate(lock)); ut_ad(rw_lock_validate(lock));
...@@ -461,7 +465,11 @@ rw_lock_x_unlock_func( ...@@ -461,7 +465,11 @@ rw_lock_x_unlock_func(
mutex_exit(&(lock->mutex)); mutex_exit(&(lock->mutex));
if (UNIV_UNLIKELY(sg)) { if (UNIV_UNLIKELY(sg)) {
sync_array_signal_object(sync_primary_wait_array, lock); #ifdef __WIN__
os_event_set(lock->wait_ex_event);
#endif
os_event_set(lock->event);
sync_array_object_signalled(sync_primary_wait_array);
} }
ut_ad(rw_lock_validate(lock)); ut_ad(rw_lock_validate(lock));
......
...@@ -470,6 +470,7 @@ Do not use its fields directly! The structure used in the spin lock ...@@ -470,6 +470,7 @@ Do not use its fields directly! The structure used in the spin lock
implementation of a mutual exclusion semaphore. */ implementation of a mutual exclusion semaphore. */
struct mutex_struct { struct mutex_struct {
os_event_t event; /* Used by sync0arr.c for the wait queue */
ulint lock_word; /* This ulint is the target of the atomic ulint lock_word; /* This ulint is the target of the atomic
test-and-set instruction in Win32 */ test-and-set instruction in Win32 */
#if !defined(_WIN32) || !defined(UNIV_CAN_USE_X86_ASSEMBLER) #if !defined(_WIN32) || !defined(UNIV_CAN_USE_X86_ASSEMBLER)
......
...@@ -211,7 +211,7 @@ mutex_exit( ...@@ -211,7 +211,7 @@ mutex_exit(
perform the read first, which could leave a waiting perform the read first, which could leave a waiting
thread hanging indefinitely. thread hanging indefinitely.
Our current solution call every 10 seconds Our current solution call every second
sync_arr_wake_threads_if_sema_free() sync_arr_wake_threads_if_sema_free()
to wake up possible hanging threads if to wake up possible hanging threads if
they are missed in mutex_signal_object. */ they are missed in mutex_signal_object. */
......
...@@ -21,6 +21,7 @@ Created 9/6/1995 Heikki Tuuri ...@@ -21,6 +21,7 @@ Created 9/6/1995 Heikki Tuuri
/* Type definition for an operating system mutex struct */ /* Type definition for an operating system mutex struct */
struct os_mutex_struct{ struct os_mutex_struct{
os_event_t event; /* Used by sync0arr.c for queing threads */
void* handle; /* OS handle to mutex */ void* handle; /* OS handle to mutex */
ulint count; /* we use this counter to check ulint count; /* we use this counter to check
that the same thread does not that the same thread does not
...@@ -35,6 +36,7 @@ struct os_mutex_struct{ ...@@ -35,6 +36,7 @@ struct os_mutex_struct{
/* Mutex protecting counts and the lists of OS mutexes and events */ /* Mutex protecting counts and the lists of OS mutexes and events */
os_mutex_t os_sync_mutex; os_mutex_t os_sync_mutex;
ibool os_sync_mutex_inited = FALSE; ibool os_sync_mutex_inited = FALSE;
ibool os_sync_free_called = FALSE;
/* This is incremented by 1 in os_thread_create and decremented by 1 in /* This is incremented by 1 in os_thread_create and decremented by 1 in
os_thread_exit */ os_thread_exit */
...@@ -50,6 +52,10 @@ ulint os_event_count = 0; ...@@ -50,6 +52,10 @@ ulint os_event_count = 0;
ulint os_mutex_count = 0; ulint os_mutex_count = 0;
ulint os_fast_mutex_count = 0; ulint os_fast_mutex_count = 0;
/* Because a mutex is embedded inside an event and there is an
event embedded inside a mutex, on free, this generates a recursive call.
This version of the free event function doesn't acquire the global lock */
static void os_event_free_internal(os_event_t event);
/************************************************************* /*************************************************************
Initializes global event and OS 'slow' mutex lists. */ Initializes global event and OS 'slow' mutex lists. */
...@@ -76,6 +82,7 @@ os_sync_free(void) ...@@ -76,6 +82,7 @@ os_sync_free(void)
os_event_t event; os_event_t event;
os_mutex_t mutex; os_mutex_t mutex;
os_sync_free_called = TRUE;
event = UT_LIST_GET_FIRST(os_event_list); event = UT_LIST_GET_FIRST(os_event_list);
while (event) { while (event) {
...@@ -99,6 +106,7 @@ os_sync_free(void) ...@@ -99,6 +106,7 @@ os_sync_free(void)
mutex = UT_LIST_GET_FIRST(os_mutex_list); mutex = UT_LIST_GET_FIRST(os_mutex_list);
} }
os_sync_free_called = FALSE;
} }
/************************************************************* /*************************************************************
...@@ -144,17 +152,31 @@ os_event_create( ...@@ -144,17 +152,31 @@ os_event_create(
ut_a(0 == pthread_cond_init(&(event->cond_var), NULL)); ut_a(0 == pthread_cond_init(&(event->cond_var), NULL));
#endif #endif
event->is_set = FALSE; event->is_set = FALSE;
event->signal_count = 0;
/* We return this value in os_event_reset(), which can then be
be used to pass to the os_event_wait_low(). The value of zero
is reserved in os_event_wait_low() for the case when the
caller does not want to pass any signal_count value. To
distinguish between the two cases we initialize signal_count
to 1 here. */
event->signal_count = 1;
#endif /* __WIN__ */ #endif /* __WIN__ */
/* Put to the list of events */ /* The os_sync_mutex can be NULL because during startup an event
os_mutex_enter(os_sync_mutex); can be created [ because it's embedded in the mutex/rwlock ] before
this module has been initialized */
if (os_sync_mutex != NULL) {
os_mutex_enter(os_sync_mutex);
}
/* Put to the list of events */
UT_LIST_ADD_FIRST(os_event_list, os_event_list, event); UT_LIST_ADD_FIRST(os_event_list, os_event_list, event);
os_event_count++; os_event_count++;
os_mutex_exit(os_sync_mutex); if (os_sync_mutex != NULL) {
os_mutex_exit(os_sync_mutex);
}
return(event); return(event);
} }
...@@ -231,13 +253,20 @@ os_event_set( ...@@ -231,13 +253,20 @@ os_event_set(
/************************************************************** /**************************************************************
Resets an event semaphore to the nonsignaled state. Waiting threads will Resets an event semaphore to the nonsignaled state. Waiting threads will
stop to wait for the event. */ stop to wait for the event.
The return value should be passed to os_even_wait_low() if it is desired
that this thread should not wait in case of an intervening call to
os_event_set() between this os_event_reset() and the
os_event_wait_low() call. See comments for os_event_wait_low(). */
void ib_longlong
os_event_reset( os_event_reset(
/*===========*/ /*===========*/
/* out: current signal_count. */
os_event_t event) /* in: event to reset */ os_event_t event) /* in: event to reset */
{ {
ib_longlong ret = 0;
#ifdef __WIN__ #ifdef __WIN__
ut_a(event); ut_a(event);
...@@ -252,9 +281,40 @@ os_event_reset( ...@@ -252,9 +281,40 @@ os_event_reset(
} else { } else {
event->is_set = FALSE; event->is_set = FALSE;
} }
ret = event->signal_count;
os_fast_mutex_unlock(&(event->os_mutex)); os_fast_mutex_unlock(&(event->os_mutex));
#endif #endif
return(ret);
}
/**************************************************************
Frees an event object, without acquiring the global lock. */
static
void
os_event_free_internal(
/*===================*/
os_event_t event) /* in: event to free */
{
#ifdef __WIN__
ut_a(event);
ut_a(CloseHandle(event->handle));
#else
ut_a(event);
/* This is to avoid freeing the mutex twice */
os_fast_mutex_free(&(event->os_mutex));
ut_a(0 == pthread_cond_destroy(&(event->cond_var)));
#endif
/* Remove from the list of events */
UT_LIST_REMOVE(os_event_list, os_event_list, event);
os_event_count--;
ut_free(event);
} }
/************************************************************** /**************************************************************
...@@ -293,18 +353,38 @@ os_event_free( ...@@ -293,18 +353,38 @@ os_event_free(
Waits for an event object until it is in the signaled state. If Waits for an event object until it is in the signaled state. If
srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS this also exits the srv_shutdown_state == SRV_SHUTDOWN_EXIT_THREADS this also exits the
waiting thread when the event becomes signaled (or immediately if the waiting thread when the event becomes signaled (or immediately if the
event is already in the signaled state). */ event is already in the signaled state).
Typically, if the event has been signalled after the os_event_reset()
we'll return immediately because event->is_set == TRUE.
There are, however, situations (e.g.: sync_array code) where we may
lose this information. For example:
thread A calls os_event_reset()
thread B calls os_event_set() [event->is_set == TRUE]
thread C calls os_event_reset() [event->is_set == FALSE]
thread A calls os_event_wait() [infinite wait!]
thread C calls os_event_wait() [infinite wait!]
Where such a scenario is possible, to avoid infinite wait, the
value returned by os_event_reset() should be passed in as
reset_sig_count. */
void void
os_event_wait( os_event_wait_low(
/*==========*/ /*==============*/
os_event_t event) /* in: event to wait */ os_event_t event, /* in: event to wait */
ib_longlong reset_sig_count)/* in: zero or the value
returned by previous call of
os_event_reset(). */
{ {
#ifdef __WIN__ #ifdef __WIN__
DWORD err; DWORD err;
ut_a(event); ut_a(event);
UT_NOT_USED(reset_sig_count);
/* Specify an infinite time limit for waiting */ /* Specify an infinite time limit for waiting */
err = WaitForSingleObject(event->handle, INFINITE); err = WaitForSingleObject(event->handle, INFINITE);
...@@ -318,7 +398,11 @@ os_event_wait( ...@@ -318,7 +398,11 @@ os_event_wait(
os_fast_mutex_lock(&(event->os_mutex)); os_fast_mutex_lock(&(event->os_mutex));
old_signal_count = event->signal_count; if (reset_sig_count) {
old_signal_count = reset_sig_count;
} else {
old_signal_count = event->signal_count;
}
for (;;) { for (;;) {
if (event->is_set == TRUE if (event->is_set == TRUE
...@@ -458,6 +542,7 @@ os_mutex_create( ...@@ -458,6 +542,7 @@ os_mutex_create(
mutex_str->handle = mutex; mutex_str->handle = mutex;
mutex_str->count = 0; mutex_str->count = 0;
mutex_str->event = os_event_create(NULL);
if (os_sync_mutex_inited) { if (os_sync_mutex_inited) {
/* When creating os_sync_mutex itself we cannot reserve it */ /* When creating os_sync_mutex itself we cannot reserve it */
...@@ -534,6 +619,10 @@ os_mutex_free( ...@@ -534,6 +619,10 @@ os_mutex_free(
{ {
ut_a(mutex); ut_a(mutex);
if (!os_sync_free_called) {
os_event_free_internal(mutex->event);
}
if (os_sync_mutex_inited) { if (os_sync_mutex_inited) {
os_mutex_enter(os_sync_mutex); os_mutex_enter(os_sync_mutex);
} }
......
...@@ -1904,12 +1904,6 @@ loop: ...@@ -1904,12 +1904,6 @@ loop:
os_thread_sleep(1000000); os_thread_sleep(1000000);
/* In case mutex_exit is not a memory barrier, it is
theoretically possible some threads are left waiting though
the semaphore is already released. Wake up those threads: */
sync_arr_wake_threads_if_sema_free();
current_time = time(NULL); current_time = time(NULL);
time_elapsed = difftime(current_time, last_monitor_time); time_elapsed = difftime(current_time, last_monitor_time);
...@@ -2106,9 +2100,15 @@ loop: ...@@ -2106,9 +2100,15 @@ loop:
srv_refresh_innodb_monitor_stats(); srv_refresh_innodb_monitor_stats();
} }
/* In case mutex_exit is not a memory barrier, it is
theoretically possible some threads are left waiting though
the semaphore is already released. Wake up those threads: */
sync_arr_wake_threads_if_sema_free();
if (sync_array_print_long_waits()) { if (sync_array_print_long_waits()) {
fatal_cnt++; fatal_cnt++;
if (fatal_cnt > 5) { if (fatal_cnt > 10) {
fprintf(stderr, fprintf(stderr,
"InnoDB: Error: semaphore wait has lasted" "InnoDB: Error: semaphore wait has lasted"
...@@ -2128,7 +2128,7 @@ loop: ...@@ -2128,7 +2128,7 @@ loop:
fflush(stderr); fflush(stderr);
os_thread_sleep(2000000); os_thread_sleep(1000000);
if (srv_shutdown_state < SRV_SHUTDOWN_CLEANUP) { if (srv_shutdown_state < SRV_SHUTDOWN_CLEANUP) {
......
This diff is collapsed.
...@@ -151,6 +151,11 @@ rw_lock_create_func( ...@@ -151,6 +151,11 @@ rw_lock_create_func(
lock->last_x_file_name = "not yet reserved"; lock->last_x_file_name = "not yet reserved";
lock->last_s_line = 0; lock->last_s_line = 0;
lock->last_x_line = 0; lock->last_x_line = 0;
lock->event = os_event_create(NULL);
#ifdef __WIN__
lock->wait_ex_event = os_event_create(NULL);
#endif
mutex_enter(&rw_lock_list_mutex); mutex_enter(&rw_lock_list_mutex);
...@@ -184,6 +189,11 @@ rw_lock_free( ...@@ -184,6 +189,11 @@ rw_lock_free(
mutex_free(rw_lock_get_mutex(lock)); mutex_free(rw_lock_get_mutex(lock));
mutex_enter(&rw_lock_list_mutex); mutex_enter(&rw_lock_list_mutex);
os_event_free(lock->event);
#ifdef __WIN__
os_event_free(lock->wait_ex_event);
#endif
if (UT_LIST_GET_PREV(list, lock)) { if (UT_LIST_GET_PREV(list, lock)) {
ut_a(UT_LIST_GET_PREV(list, lock)->magic_n == RW_LOCK_MAGIC_N); ut_a(UT_LIST_GET_PREV(list, lock)->magic_n == RW_LOCK_MAGIC_N);
...@@ -544,7 +554,15 @@ lock_loop: ...@@ -544,7 +554,15 @@ lock_loop:
rw_x_system_call_count++; rw_x_system_call_count++;
sync_array_reserve_cell(sync_primary_wait_array, sync_array_reserve_cell(sync_primary_wait_array,
lock, RW_LOCK_EX, lock,
#ifdef __WIN__
/* On windows RW_LOCK_WAIT_EX signifies
that this thread should wait on the
special wait_ex_event. */
(state == RW_LOCK_WAIT_EX)
? RW_LOCK_WAIT_EX :
#endif
RW_LOCK_EX,
file_name, line, file_name, line,
&index); &index);
......
...@@ -95,17 +95,47 @@ have happened that the thread which was holding the mutex has just released ...@@ -95,17 +95,47 @@ have happened that the thread which was holding the mutex has just released
it and did not see the waiters byte set to 1, a case which would lead the it and did not see the waiters byte set to 1, a case which would lead the
other thread to an infinite wait. other thread to an infinite wait.
LEMMA 1: After a thread resets the event of the cell it reserves for waiting LEMMA 1: After a thread resets the event of a mutex (or rw_lock), some
======== =======
for a mutex, some thread will eventually call sync_array_signal_object with thread will eventually call os_event_set() on that particular event.
the mutex as an argument. Thus no infinite wait is possible. Thus no infinite wait is possible in this case.
Proof: After making the reservation the thread sets the waiters field in the Proof: After making the reservation the thread sets the waiters field in the
mutex to 1. Then it checks that the mutex is still reserved by some thread, mutex to 1. Then it checks that the mutex is still reserved by some thread,
or it reserves the mutex for itself. In any case, some thread (which may be or it reserves the mutex for itself. In any case, some thread (which may be
also some earlier thread, not necessarily the one currently holding the mutex) also some earlier thread, not necessarily the one currently holding the mutex)
will set the waiters field to 0 in mutex_exit, and then call will set the waiters field to 0 in mutex_exit, and then call
sync_array_signal_object with the mutex as an argument. os_event_set() with the mutex as an argument.
Q.E.D.
LEMMA 2: If an os_event_set() call is made after some thread has called
=======
the os_event_reset() and before it starts wait on that event, the call
will not be lost to the second thread. This is true even if there is an
intervening call to os_event_reset() by another thread.
Thus no infinite wait is possible in this case.
Proof (non-windows platforms): os_event_reset() returns a monotonically
increasing value of signal_count. This value is increased at every
call of os_event_set() If thread A has called os_event_reset() followed
by thread B calling os_event_set() and then some other thread C calling
os_event_reset(), the is_set flag of the event will be set to FALSE;
but now if thread A calls os_event_wait_low() with the signal_count
value returned from the earlier call of os_event_reset(), it will
return immediately without waiting.
Q.E.D.
Proof (windows): If there is a writer thread which is forced to wait for
the lock, it may be able to set the state of rw_lock to RW_LOCK_WAIT_EX
The design of rw_lock ensures that there is one and only one thread
that is able to change the state to RW_LOCK_WAIT_EX and this thread is
guaranteed to acquire the lock after it is released by the current
holders and before any other waiter gets the lock.
On windows this thread waits on a separate event i.e.: wait_ex_event.
Since only one thread can wait on this event there is no chance
of this event getting reset before the writer starts wait on it.
Therefore, this thread is guaranteed to catch the os_set_event()
signalled unconditionally at the release of the lock.
Q.E.D. */ Q.E.D. */
/* The number of system calls made in this module. Intended for performance /* The number of system calls made in this module. Intended for performance
...@@ -217,6 +247,7 @@ mutex_create_func( ...@@ -217,6 +247,7 @@ mutex_create_func(
os_fast_mutex_init(&(mutex->os_fast_mutex)); os_fast_mutex_init(&(mutex->os_fast_mutex));
mutex->lock_word = 0; mutex->lock_word = 0;
#endif #endif
mutex->event = os_event_create(NULL);
mutex_set_waiters(mutex, 0); mutex_set_waiters(mutex, 0);
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
mutex->magic_n = MUTEX_MAGIC_N; mutex->magic_n = MUTEX_MAGIC_N;
...@@ -300,6 +331,8 @@ mutex_free( ...@@ -300,6 +331,8 @@ mutex_free(
mutex_exit(&mutex_list_mutex); mutex_exit(&mutex_list_mutex);
} }
os_event_free(mutex->event);
#if !defined(_WIN32) || !defined(UNIV_CAN_USE_X86_ASSEMBLER) #if !defined(_WIN32) || !defined(UNIV_CAN_USE_X86_ASSEMBLER)
os_fast_mutex_free(&(mutex->os_fast_mutex)); os_fast_mutex_free(&(mutex->os_fast_mutex));
#endif #endif
...@@ -509,8 +542,7 @@ spin_loop: ...@@ -509,8 +542,7 @@ spin_loop:
if (mutex_test_and_set(mutex) == 0) { if (mutex_test_and_set(mutex) == 0) {
/* Succeeded! Free the reserved wait cell */ /* Succeeded! Free the reserved wait cell */
sync_array_free_cell_protected(sync_primary_wait_array, sync_array_free_cell(sync_primary_wait_array, index);
index);
ut_d(mutex->thread_id = os_thread_get_curr_id()); ut_d(mutex->thread_id = os_thread_get_curr_id());
#ifdef UNIV_SYNC_DEBUG #ifdef UNIV_SYNC_DEBUG
...@@ -591,8 +623,8 @@ mutex_signal_object( ...@@ -591,8 +623,8 @@ mutex_signal_object(
/* The memory order of resetting the waiters field and /* The memory order of resetting the waiters field and
signaling the object is important. See LEMMA 1 above. */ signaling the object is important. See LEMMA 1 above. */
os_event_set(mutex->event);
sync_array_signal_object(sync_primary_wait_array, mutex); sync_array_object_signalled(sync_primary_wait_array);
} }
#ifdef UNIV_SYNC_DEBUG #ifdef UNIV_SYNC_DEBUG
...@@ -1130,6 +1162,7 @@ sync_thread_add_level( ...@@ -1130,6 +1162,7 @@ sync_thread_add_level(
break; break;
case SYNC_TREE_NODE: case SYNC_TREE_NODE:
ut_a(sync_thread_levels_contain(array, SYNC_INDEX_TREE) ut_a(sync_thread_levels_contain(array, SYNC_INDEX_TREE)
|| sync_thread_levels_contain(array, SYNC_DICT_OPERATION)
|| sync_thread_levels_g(array, SYNC_TREE_NODE - 1)); || sync_thread_levels_g(array, SYNC_TREE_NODE - 1));
break; break;
case SYNC_TREE_NODE_NEW: case SYNC_TREE_NODE_NEW:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment