Commit e1602b8d authored by Mikael Ronstrom's avatar Mikael Ronstrom

merge Google SMP patch

parents b353b37d 1732095b
...@@ -333,7 +333,7 @@ btr_cur_search_to_nth_level( ...@@ -333,7 +333,7 @@ btr_cur_search_to_nth_level(
#ifdef UNIV_SEARCH_PERF_STAT #ifdef UNIV_SEARCH_PERF_STAT
info->n_searches++; info->n_searches++;
#endif #endif
if (btr_search_latch.writer == RW_LOCK_NOT_LOCKED if (rw_lock_get_writer(&btr_search_latch) == RW_LOCK_NOT_LOCKED
&& latch_mode <= BTR_MODIFY_LEAF && info->last_hash_succ && latch_mode <= BTR_MODIFY_LEAF && info->last_hash_succ
&& !estimate && !estimate
#ifdef PAGE_CUR_LE_OR_EXTENDS #ifdef PAGE_CUR_LE_OR_EXTENDS
......
...@@ -748,8 +748,8 @@ btr_search_guess_on_hash( ...@@ -748,8 +748,8 @@ btr_search_guess_on_hash(
rw_lock_s_lock(&btr_search_latch); rw_lock_s_lock(&btr_search_latch);
} }
ut_ad(btr_search_latch.writer != RW_LOCK_EX); ut_ad(rw_lock_get_writer(&btr_search_latch) != RW_LOCK_EX);
ut_ad(btr_search_latch.reader_count > 0); ut_ad(rw_lock_get_reader_count(&btr_search_latch) > 0);
rec = ha_search_and_get_data(btr_search_sys->hash_index, fold); rec = ha_search_and_get_data(btr_search_sys->hash_index, fold);
......
...@@ -1277,8 +1277,8 @@ buf_page_get_gen( ...@@ -1277,8 +1277,8 @@ buf_page_get_gen(
if (mode == BUF_GET_NOWAIT) { if (mode == BUF_GET_NOWAIT) {
if (rw_latch == RW_S_LATCH) { if (rw_latch == RW_S_LATCH) {
success = rw_lock_s_lock_func_nowait(&(block->lock), success = rw_lock_s_lock_nowait(&(block->lock),
file, line); file, line);
fix_type = MTR_MEMO_PAGE_S_FIX; fix_type = MTR_MEMO_PAGE_S_FIX;
} else { } else {
ut_ad(rw_latch == RW_X_LATCH); ut_ad(rw_latch == RW_X_LATCH);
...@@ -1403,8 +1403,8 @@ buf_page_optimistic_get_func( ...@@ -1403,8 +1403,8 @@ buf_page_optimistic_get_func(
ut_ad(!ibuf_inside() || ibuf_page(block->space, block->offset)); ut_ad(!ibuf_inside() || ibuf_page(block->space, block->offset));
if (rw_latch == RW_S_LATCH) { if (rw_latch == RW_S_LATCH) {
success = rw_lock_s_lock_func_nowait(&(block->lock), success = rw_lock_s_lock_nowait(&(block->lock),
file, line); file, line);
fix_type = MTR_MEMO_PAGE_S_FIX; fix_type = MTR_MEMO_PAGE_S_FIX;
} else { } else {
success = rw_lock_x_lock_func_nowait(&(block->lock), success = rw_lock_x_lock_func_nowait(&(block->lock),
...@@ -1534,8 +1534,8 @@ buf_page_get_known_nowait( ...@@ -1534,8 +1534,8 @@ buf_page_get_known_nowait(
ut_ad(!ibuf_inside() || (mode == BUF_KEEP_OLD)); ut_ad(!ibuf_inside() || (mode == BUF_KEEP_OLD));
if (rw_latch == RW_S_LATCH) { if (rw_latch == RW_S_LATCH) {
success = rw_lock_s_lock_func_nowait(&(block->lock), success = rw_lock_s_lock_nowait(&(block->lock),
file, line); file, line);
fix_type = MTR_MEMO_PAGE_S_FIX; fix_type = MTR_MEMO_PAGE_S_FIX;
} else { } else {
success = rw_lock_x_lock_func_nowait(&(block->lock), success = rw_lock_x_lock_func_nowait(&(block->lock),
......
...@@ -389,6 +389,10 @@ static SHOW_VAR innodb_status_variables[]= { ...@@ -389,6 +389,10 @@ static SHOW_VAR innodb_status_variables[]= {
(char*) &export_vars.innodb_dblwr_pages_written, SHOW_LONG}, (char*) &export_vars.innodb_dblwr_pages_written, SHOW_LONG},
{"dblwr_writes", {"dblwr_writes",
(char*) &export_vars.innodb_dblwr_writes, SHOW_LONG}, (char*) &export_vars.innodb_dblwr_writes, SHOW_LONG},
{"have_atomic_builtins",
(char*) &export_vars.innodb_have_atomic_builtins, SHOW_BOOL},
{"heap_enabled",
(char*) &export_vars.innodb_heap_enabled, SHOW_BOOL},
{"log_waits", {"log_waits",
(char*) &export_vars.innodb_log_waits, SHOW_LONG}, (char*) &export_vars.innodb_log_waits, SHOW_LONG},
{"log_write_requests", {"log_write_requests",
...@@ -6899,6 +6903,7 @@ innodb_mutex_show_status( ...@@ -6899,6 +6903,7 @@ innodb_mutex_show_status(
{ {
char buf1[IO_SIZE], buf2[IO_SIZE]; char buf1[IO_SIZE], buf2[IO_SIZE];
mutex_t* mutex; mutex_t* mutex;
rw_lock_t* lock;
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
ulint rw_lock_count= 0; ulint rw_lock_count= 0;
ulint rw_lock_count_spin_loop= 0; ulint rw_lock_count_spin_loop= 0;
...@@ -6969,6 +6974,31 @@ innodb_mutex_show_status( ...@@ -6969,6 +6974,31 @@ innodb_mutex_show_status(
mutex_exit_noninline(&mutex_list_mutex); mutex_exit_noninline(&mutex_list_mutex);
mutex_enter_noninline(&rw_lock_list_mutex);
lock = UT_LIST_GET_FIRST(rw_lock_list);
while (lock != NULL)
{
if (lock->count_os_wait)
{
buf1len= my_snprintf(buf1, sizeof(buf1), "%s:%lu",
lock->cfile_name, (ulong) lock->cline);
buf2len= my_snprintf(buf2, sizeof(buf2),
"os_waits=%lu", lock->count_os_wait);
if (stat_print(thd, innobase_hton_name,
hton_name_len, buf1, buf1len,
buf2, buf2len)) {
mutex_exit_noninline(&rw_lock_list_mutex);
DBUG_RETURN(1);
}
}
lock = UT_LIST_GET_NEXT(list, lock);
}
mutex_exit_noninline(&rw_lock_list_mutex);
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
buf2len= my_snprintf(buf2, sizeof(buf2), buf2len= my_snprintf(buf2, sizeof(buf2),
"count=%lu, spin_waits=%lu, spin_rounds=%lu, " "count=%lu, spin_waits=%lu, spin_rounds=%lu, "
...@@ -7001,6 +7031,7 @@ bool innobase_show_status(handlerton *hton, THD* thd, ...@@ -7001,6 +7031,7 @@ bool innobase_show_status(handlerton *hton, THD* thd,
return FALSE; return FALSE;
} }
} }
rw_lock_t* lock;
/**************************************************************************** /****************************************************************************
......
...@@ -513,7 +513,7 @@ buf_block_buf_fix_inc_debug( ...@@ -513,7 +513,7 @@ buf_block_buf_fix_inc_debug(
{ {
ibool ret; ibool ret;
ret = rw_lock_s_lock_func_nowait(&(block->debug_latch), file, line); ret = rw_lock_s_lock_nowait(&(block->debug_latch), file, line);
ut_ad(ret == TRUE); ut_ad(ret == TRUE);
ut_ad(mutex_own(&block->mutex)); ut_ad(mutex_own(&block->mutex));
......
...@@ -261,6 +261,29 @@ os_fast_mutex_free( ...@@ -261,6 +261,29 @@ os_fast_mutex_free(
/*===============*/ /*===============*/
os_fast_mutex_t* fast_mutex); /* in: mutex to free */ os_fast_mutex_t* fast_mutex); /* in: mutex to free */
#ifdef HAVE_GCC_ATOMIC_BUILTINS
/**************************************************************
Atomic compare-and-swap for InnoDB. Currently requires GCC atomic builtins. */
UNIV_INLINE
ibool
os_compare_and_swap(
/*================*/
/* out: true if swapped */
volatile lint* ptr, /* in: pointer to target */
lint oldVal, /* in: value to compare to */
lint newVal); /* in: value to swap in */
/**************************************************************
Atomic increment for InnoDB. Currently requires GCC atomic builtins. */
UNIV_INLINE
lint
os_atomic_increment(
/*================*/
/* out: resulting value */
volatile lint* ptr, /* in: pointer to target */
lint amount); /* in: amount of increment */
#endif /* HAVE_GCC_ATOMIC_BUILTINS */
#ifndef UNIV_NONINL #ifndef UNIV_NONINL
#include "os0sync.ic" #include "os0sync.ic"
#endif #endif
......
...@@ -44,3 +44,38 @@ os_fast_mutex_trylock( ...@@ -44,3 +44,38 @@ os_fast_mutex_trylock(
#endif #endif
#endif #endif
} }
#ifdef HAVE_GCC_ATOMIC_BUILTINS
/**************************************************************
Atomic compare-and-swap for InnoDB. Currently requires GCC atomic builtins. */
UNIV_INLINE
ibool
os_compare_and_swap(
/*================*/
/* out: true if swapped */
volatile lint* ptr, /* in: pointer to target */
lint oldVal, /* in: value to compare to */
lint newVal) /* in: value to swap in */
{
if(__sync_bool_compare_and_swap(ptr, oldVal, newVal)) {
return(TRUE);
}
return(FALSE);
}
/**************************************************************
Atomic increment for InnoDB. Currently requires GCC atomic builtins. */
UNIV_INLINE
lint
os_atomic_increment(
/*================*/
/* out: resulting value */
volatile lint* ptr, /* in: pointer to target */
lint amount) /* in: amount of increment */
{
lint newVal = __sync_add_and_fetch(ptr, amount);
return newVal;
}
#endif /* HAVE_GCC_ATOMIC_BUILTINS */
...@@ -527,6 +527,8 @@ struct export_var_struct{ ...@@ -527,6 +527,8 @@ struct export_var_struct{
ulint innodb_buffer_pool_read_ahead_rnd; ulint innodb_buffer_pool_read_ahead_rnd;
ulint innodb_dblwr_pages_written; ulint innodb_dblwr_pages_written;
ulint innodb_dblwr_writes; ulint innodb_dblwr_writes;
ibool innodb_have_atomic_builtins;
ibool innodb_heap_enabled;
ulint innodb_log_waits; ulint innodb_log_waits;
ulint innodb_log_write_requests; ulint innodb_log_write_requests;
ulint innodb_log_writes; ulint innodb_log_writes;
...@@ -563,4 +565,3 @@ struct srv_sys_struct{ ...@@ -563,4 +565,3 @@ struct srv_sys_struct{
extern ulint srv_n_threads_active[]; extern ulint srv_n_threads_active[];
#endif #endif
...@@ -24,6 +24,12 @@ smaller than 30 and the order of the numerical values like below! */ ...@@ -24,6 +24,12 @@ smaller than 30 and the order of the numerical values like below! */
#define RW_X_LATCH 2 #define RW_X_LATCH 2
#define RW_NO_LATCH 3 #define RW_NO_LATCH 3
/* We decrement lock_word by this amount for each x_lock. It is also the
start value for the lock_word, meaning that it limits the maximum number
of concurrent read locks before the rw_lock breaks. The current value of
0x00100000 allows 1,048,575 concurrent readers and 2047 recursive writers.*/
#define X_LOCK_DECR 0x00100000
typedef struct rw_lock_struct rw_lock_t; typedef struct rw_lock_struct rw_lock_t;
#ifdef UNIV_SYNC_DEBUG #ifdef UNIV_SYNC_DEBUG
typedef struct rw_lock_debug_struct rw_lock_debug_t; typedef struct rw_lock_debug_struct rw_lock_debug_t;
...@@ -47,14 +53,14 @@ extern ibool rw_lock_debug_waiters; /* This is set to TRUE, if ...@@ -47,14 +53,14 @@ extern ibool rw_lock_debug_waiters; /* This is set to TRUE, if
there may be waiters for the event */ there may be waiters for the event */
#endif /* UNIV_SYNC_DEBUG */ #endif /* UNIV_SYNC_DEBUG */
extern ulint rw_s_system_call_count; extern ib_longlong rw_s_spin_wait_count;
extern ulint rw_s_spin_wait_count; extern ib_longlong rw_s_spin_round_count;
extern ulint rw_s_exit_count; extern ib_longlong rw_s_exit_count;
extern ulint rw_s_os_wait_count; extern ib_longlong rw_s_os_wait_count;
extern ulint rw_x_system_call_count; extern ib_longlong rw_x_spin_wait_count;
extern ulint rw_x_spin_wait_count; extern ib_longlong rw_x_spin_round_count;
extern ulint rw_x_os_wait_count; extern ib_longlong rw_x_os_wait_count;
extern ulint rw_x_exit_count; extern ib_longlong rw_x_exit_count;
/********************************************************************** /**********************************************************************
Creates, or rather, initializes an rw-lock object in a specified memory Creates, or rather, initializes an rw-lock object in a specified memory
...@@ -127,8 +133,8 @@ corresponding function. */ ...@@ -127,8 +133,8 @@ corresponding function. */
NOTE! The following macros should be used in rw s-locking, not the NOTE! The following macros should be used in rw s-locking, not the
corresponding function. */ corresponding function. */
#define rw_lock_s_lock_nowait(M) rw_lock_s_lock_func_nowait(\ #define rw_lock_s_lock_nowait(M, F, L) rw_lock_s_lock_low(\
(M), __FILE__, __LINE__) (M), 0, (F), (L))
/********************************************************************** /**********************************************************************
NOTE! Use the corresponding macro, not directly this function, except if NOTE! Use the corresponding macro, not directly this function, except if
you supply the file name and line number. Lock an rw-lock in shared mode you supply the file name and line number. Lock an rw-lock in shared mode
...@@ -146,18 +152,6 @@ rw_lock_s_lock_func( ...@@ -146,18 +152,6 @@ rw_lock_s_lock_func(
const char* file_name,/* in: file name where lock requested */ const char* file_name,/* in: file name where lock requested */
ulint line); /* in: line where requested */ ulint line); /* in: line where requested */
/********************************************************************** /**********************************************************************
NOTE! Use the corresponding macro, not directly this function, except if
you supply the file name and line number. Lock an rw-lock in shared mode
for the current thread if the lock can be acquired immediately. */
UNIV_INLINE
ibool
rw_lock_s_lock_func_nowait(
/*=======================*/
/* out: TRUE if success */
rw_lock_t* lock, /* in: pointer to rw-lock */
const char* file_name,/* in: file name where lock requested */
ulint line); /* in: line where requested */
/**********************************************************************
NOTE! Use the corresponding macro, not directly this function! Lock an NOTE! Use the corresponding macro, not directly this function! Lock an
rw-lock in exclusive mode for the current thread if the lock can be rw-lock in exclusive mode for the current thread if the lock can be
obtained immediately. */ obtained immediately. */
...@@ -341,6 +335,23 @@ ulint ...@@ -341,6 +335,23 @@ ulint
rw_lock_get_reader_count( rw_lock_get_reader_count(
/*=====================*/ /*=====================*/
rw_lock_t* lock); rw_lock_t* lock);
/**********************************************************************
Decrements lock_word the specified amount if it is greater than 0.
This is used by both s_lock and x_lock operations. */
UNIV_INLINE
ibool
rw_lock_lock_word_decr(
/* out: TRUE if decr occurs */
rw_lock_t* lock, /* in: rw-lock */
ulint amount); /* in: amount to decrement */
/**********************************************************************
Increments lock_word the specified amount and returns new value. */
UNIV_INLINE
lint
rw_lock_lock_word_incr(
/* out: TRUE if decr occurs */
rw_lock_t* lock,
ulint amount); /* in: rw-lock */
#ifdef UNIV_SYNC_DEBUG #ifdef UNIV_SYNC_DEBUG
/********************************************************************** /**********************************************************************
Checks if the thread has locked the rw-lock in the specified mode, with Checks if the thread has locked the rw-lock in the specified mode, with
...@@ -417,44 +428,28 @@ Do not use its fields directly! The structure used in the spin lock ...@@ -417,44 +428,28 @@ Do not use its fields directly! The structure used in the spin lock
implementation of a read-write lock. Several threads may have a shared lock implementation of a read-write lock. Several threads may have a shared lock
simultaneously in this lock, but only one writer may have an exclusive lock, simultaneously in this lock, but only one writer may have an exclusive lock,
in which case no shared locks are allowed. To prevent starving of a writer in which case no shared locks are allowed. To prevent starving of a writer
blocked by readers, a writer may queue for the lock by setting the writer blocked by readers, a writer may queue for x-lock by decrementing lock_word:
field. Then no new readers are allowed in. */ no new readers will be let in while the thread waits for readers to exit. */
struct rw_lock_struct { struct rw_lock_struct {
os_event_t event; /* Used by sync0arr.c for thread queueing */ volatile lint lock_word;
/* Holds the state of the lock. */
#ifdef __WIN__ volatile ulint waiters;/* 1: there are waiters */
os_event_t wait_ex_event; /* This windows specific event is volatile ulint pass; /* Default value 0. This is set to some
used by the thread which has set the
lock state to RW_LOCK_WAIT_EX. The
rw_lock design guarantees that this
thread will be the next one to proceed
once the current the event gets
signalled. See LEMMA 2 in sync0sync.c */
#endif
ulint reader_count; /* Number of readers who have locked this
lock in the shared mode */
ulint writer; /* This field is set to RW_LOCK_EX if there
is a writer owning the lock (in exclusive
mode), RW_LOCK_WAIT_EX if a writer is
queueing for the lock, and
RW_LOCK_NOT_LOCKED, otherwise. */
os_thread_id_t writer_thread;
/* Thread id of a possible writer thread */
ulint writer_count; /* Number of times the same thread has
recursively locked the lock in the exclusive
mode */
mutex_t mutex; /* The mutex protecting rw_lock_struct */
ulint pass; /* Default value 0. This is set to some
value != 0 given by the caller of an x-lock value != 0 given by the caller of an x-lock
operation, if the x-lock is to be passed to operation, if the x-lock is to be passed to
another thread to unlock (which happens in another thread to unlock (which happens in
asynchronous i/o). */ asynchronous i/o). */
ulint waiters; /* This ulint is set to 1 if there are volatile os_thread_id_t writer_thread;
waiters (readers or writers) in the global /* Thread id of writer thread */
wait array, waiting for this rw_lock. os_event_t event; /* Used by sync0arr.c for thread queueing */
Otherwise, == 0. */ os_event_t wait_ex_event;
/* Event for next-writer to wait on. A thread
must decrement lock_word before waiting. */
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_t mutex; /* The mutex protecting rw_lock_struct */
#endif /* HAVE_GCC_ATOMIC_BUILTINS */
UT_LIST_NODE_T(rw_lock_t) list; UT_LIST_NODE_T(rw_lock_t) list;
/* All allocated rw locks are put into a /* All allocated rw locks are put into a
list */ list */
...@@ -464,7 +459,9 @@ struct rw_lock_struct { ...@@ -464,7 +459,9 @@ struct rw_lock_struct {
info list of the lock */ info list of the lock */
ulint level; /* Level in the global latching order. */ ulint level; /* Level in the global latching order. */
#endif /* UNIV_SYNC_DEBUG */ #endif /* UNIV_SYNC_DEBUG */
ulint count_os_wait; /* Count of os_waits. May not be accurate */
const char* cfile_name;/* File name where lock created */ const char* cfile_name;/* File name where lock created */
/* last s-lock file/line is not guaranteed to be correct */
const char* last_s_file_name;/* File name where last s-locked */ const char* last_s_file_name;/* File name where last s-locked */
const char* last_x_file_name;/* File name where last x-locked */ const char* last_x_file_name;/* File name where last x-locked */
ibool writer_is_wait_ex; ibool writer_is_wait_ex;
......
This diff is collapsed.
...@@ -252,7 +252,7 @@ mutex_n_reserved(void); ...@@ -252,7 +252,7 @@ mutex_n_reserved(void);
NOT to be used outside this module except in debugging! Gets the value NOT to be used outside this module except in debugging! Gets the value
of the lock word. */ of the lock word. */
UNIV_INLINE UNIV_INLINE
ulint byte
mutex_get_lock_word( mutex_get_lock_word(
/*================*/ /*================*/
const mutex_t* mutex); /* in: mutex */ const mutex_t* mutex); /* in: mutex */
...@@ -471,9 +471,13 @@ implementation of a mutual exclusion semaphore. */ ...@@ -471,9 +471,13 @@ implementation of a mutual exclusion semaphore. */
struct mutex_struct { struct mutex_struct {
os_event_t event; /* Used by sync0arr.c for the wait queue */ os_event_t event; /* Used by sync0arr.c for the wait queue */
ulint lock_word; /* This ulint is the target of the atomic
test-and-set instruction in Win32 */ byte lock_word; /* This byte is the target of the atomic
#if !defined(_WIN32) || !defined(UNIV_CAN_USE_X86_ASSEMBLER) test-and-set instruction in Win32 and
x86 32/64 with GCC 4.1.0 or later version */
#if defined(_WIN32) && defined(UNIV_CAN_USE_X86_ASSEMBLER)
#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
#else
os_fast_mutex_t os_fast_mutex_t
os_fast_mutex; /* In other systems we use this OS mutex os_fast_mutex; /* In other systems we use this OS mutex
in place of lock_word */ in place of lock_word */
...@@ -526,8 +530,7 @@ to 20 microseconds. */ ...@@ -526,8 +530,7 @@ to 20 microseconds. */
/* The number of system calls made in this module. Intended for performance /* The number of system calls made in this module. Intended for performance
monitoring. */ monitoring. */
extern ulint mutex_system_call_count; extern ib_longlong mutex_exit_count;
extern ulint mutex_exit_count;
#ifdef UNIV_SYNC_DEBUG #ifdef UNIV_SYNC_DEBUG
/* Latching order checks start when this is set TRUE */ /* Latching order checks start when this is set TRUE */
......
...@@ -6,16 +6,6 @@ Mutex, the basic synchronization primitive ...@@ -6,16 +6,6 @@ Mutex, the basic synchronization primitive
Created 9/5/1995 Heikki Tuuri Created 9/5/1995 Heikki Tuuri
*******************************************************/ *******************************************************/
#if defined(not_defined) && defined(__GNUC__) && defined(UNIV_INTEL_X86)
/* %z0: Use the size of operand %0 which in our case is *m to determine
instruction size, it should end up as xchgl. "1" in the input constraint,
says that "in" has to go in the same place as "out".*/
#define TAS(m, in, out) \
asm volatile ("xchg%z0 %2, %0" \
: "=g" (*(m)), "=r" (out) \
: "1" (in)) /* Note: "1" here refers to "=r" (out) */
#endif
/********************************************************************** /**********************************************************************
Sets the waiters field in a mutex. */ Sets the waiters field in a mutex. */
...@@ -59,7 +49,7 @@ mutex_signal_object( ...@@ -59,7 +49,7 @@ mutex_signal_object(
Performs an atomic test-and-set instruction to the lock_word field of a Performs an atomic test-and-set instruction to the lock_word field of a
mutex. */ mutex. */
UNIV_INLINE UNIV_INLINE
ulint byte
mutex_test_and_set( mutex_test_and_set(
/*===============*/ /*===============*/
/* out: the previous value of lock_word: 0 or /* out: the previous value of lock_word: 0 or
...@@ -67,18 +57,18 @@ mutex_test_and_set( ...@@ -67,18 +57,18 @@ mutex_test_and_set(
mutex_t* mutex) /* in: mutex */ mutex_t* mutex) /* in: mutex */
{ {
#if defined(_WIN32) && defined(UNIV_CAN_USE_X86_ASSEMBLER) #if defined(_WIN32) && defined(UNIV_CAN_USE_X86_ASSEMBLER)
ulint res; byte res;
ulint* lw; /* assembler code is used to ensure that byte* lw; /* assembler code is used to ensure that
lock_word is loaded from memory */ lock_word is loaded from memory */
ut_ad(mutex); ut_ad(mutex);
ut_ad(sizeof(ulint) == 4); ut_ad(sizeof(byte) == 1);
lw = &(mutex->lock_word); lw = &(mutex->lock_word);
__asm MOV ECX, lw __asm MOV ECX, lw
__asm MOV EDX, 1 __asm MOV EDX, 1
__asm XCHG EDX, DWORD PTR [ECX] __asm XCHG DL, BYTE PTR [ECX]
__asm MOV res, EDX __asm MOV res, DL
/* The fence below would prevent this thread from /* The fence below would prevent this thread from
reading the data structure protected by the mutex reading the data structure protected by the mutex
...@@ -98,12 +88,8 @@ mutex_test_and_set( ...@@ -98,12 +88,8 @@ mutex_test_and_set(
/* mutex_fence(); */ /* mutex_fence(); */
return(res); return(res);
#elif defined(not_defined) && defined(__GNUC__) && defined(UNIV_INTEL_X86) #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
ulint res; return __sync_lock_test_and_set(&(mutex->lock_word), 1);
TAS(&mutex->lock_word, 1, res);
return(res);
#else #else
ibool ret; ibool ret;
...@@ -117,7 +103,7 @@ mutex_test_and_set( ...@@ -117,7 +103,7 @@ mutex_test_and_set(
mutex->lock_word = 1; mutex->lock_word = 1;
} }
return(ret); return((byte)ret);
#endif #endif
} }
...@@ -131,7 +117,7 @@ mutex_reset_lock_word( ...@@ -131,7 +117,7 @@ mutex_reset_lock_word(
mutex_t* mutex) /* in: mutex */ mutex_t* mutex) /* in: mutex */
{ {
#if defined(_WIN32) && defined(UNIV_CAN_USE_X86_ASSEMBLER) #if defined(_WIN32) && defined(UNIV_CAN_USE_X86_ASSEMBLER)
ulint* lw; /* assembler code is used to ensure that byte* lw; /* assembler code is used to ensure that
lock_word is loaded from memory */ lock_word is loaded from memory */
ut_ad(mutex); ut_ad(mutex);
...@@ -139,11 +125,12 @@ mutex_reset_lock_word( ...@@ -139,11 +125,12 @@ mutex_reset_lock_word(
__asm MOV EDX, 0 __asm MOV EDX, 0
__asm MOV ECX, lw __asm MOV ECX, lw
__asm XCHG EDX, DWORD PTR [ECX] __asm XCHG DL, BYTE PTR [ECX]
#elif defined(not_defined) && defined(__GNUC__) && defined(UNIV_INTEL_X86) #elif defined(HAVE_GCC_ATOMIC_BUILTINS)
ulint res; /* In theory __sync_lock_release should be used to release the lock.
Unfortunately, it does not work properly alone. The workaround is
TAS(&mutex->lock_word, 0, res); that more conservative __sync_lock_test_and_set is used instead. */
__sync_lock_test_and_set(&(mutex->lock_word), 0);
#else #else
mutex->lock_word = 0; mutex->lock_word = 0;
...@@ -154,12 +141,12 @@ mutex_reset_lock_word( ...@@ -154,12 +141,12 @@ mutex_reset_lock_word(
/********************************************************************** /**********************************************************************
Gets the value of the lock word. */ Gets the value of the lock word. */
UNIV_INLINE UNIV_INLINE
ulint byte
mutex_get_lock_word( mutex_get_lock_word(
/*================*/ /*================*/
const mutex_t* mutex) /* in: mutex */ const mutex_t* mutex) /* in: mutex */
{ {
const volatile ulint* ptr; /* declared volatile to ensure that const volatile byte* ptr; /* declared volatile to ensure that
lock_word is loaded from memory */ lock_word is loaded from memory */
ut_ad(mutex); ut_ad(mutex);
......
...@@ -120,6 +120,9 @@ by one. */ ...@@ -120,6 +120,9 @@ by one. */
#define UNIV_SET_MEM_TO_ZERO #define UNIV_SET_MEM_TO_ZERO
#endif #endif
/* Use malloc instead of innodb additional memory pool (great with tcmalloc) */
#define UNIV_DISABLE_MEM_POOL
/* /*
#define UNIV_SQL_DEBUG #define UNIV_SQL_DEBUG
#define UNIV_LOG_DEBUG #define UNIV_LOG_DEBUG
......
...@@ -329,6 +329,9 @@ mem_area_alloc( ...@@ -329,6 +329,9 @@ mem_area_alloc(
minus MEM_AREA_EXTRA_SIZE */ minus MEM_AREA_EXTRA_SIZE */
mem_pool_t* pool) /* in: memory pool */ mem_pool_t* pool) /* in: memory pool */
{ {
#ifdef UNIV_DISABLE_MEM_POOL
return malloc(size);
#else /* UNIV_DISABLE_MEM_POOL */
mem_area_t* area; mem_area_t* area;
ulint n; ulint n;
ibool ret; ibool ret;
...@@ -407,6 +410,7 @@ mem_area_alloc( ...@@ -407,6 +410,7 @@ mem_area_alloc(
ut_2_exp(n) - MEM_AREA_EXTRA_SIZE); ut_2_exp(n) - MEM_AREA_EXTRA_SIZE);
return((void*)(MEM_AREA_EXTRA_SIZE + ((byte*)area))); return((void*)(MEM_AREA_EXTRA_SIZE + ((byte*)area)));
#endif /* UNIV_DISABLE_MEM_POOL */
} }
/************************************************************************ /************************************************************************
...@@ -459,6 +463,9 @@ mem_area_free( ...@@ -459,6 +463,9 @@ mem_area_free(
buffer */ buffer */
mem_pool_t* pool) /* in: memory pool */ mem_pool_t* pool) /* in: memory pool */
{ {
#ifdef UNIV_DISABLE_MEM_POOL
free(ptr);
#else /* UNIV_DISABLE_MEM_POOL */
mem_area_t* area; mem_area_t* area;
mem_area_t* buddy; mem_area_t* buddy;
void* new_ptr; void* new_ptr;
...@@ -570,6 +577,7 @@ mem_area_free( ...@@ -570,6 +577,7 @@ mem_area_free(
mutex_exit(&(pool->mutex)); mutex_exit(&(pool->mutex));
ut_ad(mem_pool_validate(pool)); ut_ad(mem_pool_validate(pool));
#endif /* UNIV_DISABLE_MEM_POOL */
} }
/************************************************************************ /************************************************************************
......
...@@ -1248,7 +1248,7 @@ row_sel( ...@@ -1248,7 +1248,7 @@ row_sel(
rw_lock_s_lock(&btr_search_latch); rw_lock_s_lock(&btr_search_latch);
search_latch_locked = TRUE; search_latch_locked = TRUE;
} else if (btr_search_latch.writer_is_wait_ex) { } else if (rw_lock_get_writer(&btr_search_latch) == RW_LOCK_WAIT_EX) {
/* There is an x-latch request waiting: release the /* There is an x-latch request waiting: release the
s-latch for a moment; as an s-latch here is often s-latch for a moment; as an s-latch here is often
...@@ -3327,7 +3327,7 @@ row_search_for_mysql( ...@@ -3327,7 +3327,7 @@ row_search_for_mysql(
/* PHASE 0: Release a possible s-latch we are holding on the /* PHASE 0: Release a possible s-latch we are holding on the
adaptive hash index latch if there is someone waiting behind */ adaptive hash index latch if there is someone waiting behind */
if (UNIV_UNLIKELY(btr_search_latch.writer != RW_LOCK_NOT_LOCKED) if (UNIV_UNLIKELY(rw_lock_get_writer(&btr_search_latch) != RW_LOCK_NOT_LOCKED)
&& trx->has_search_latch) { && trx->has_search_latch) {
/* There is an x-latch request on the adaptive hash index: /* There is an x-latch request on the adaptive hash index:
......
...@@ -1916,6 +1916,16 @@ srv_export_innodb_status(void) ...@@ -1916,6 +1916,16 @@ srv_export_innodb_status(void)
export_vars.innodb_buffer_pool_pages_misc = buf_pool->max_size export_vars.innodb_buffer_pool_pages_misc = buf_pool->max_size
- UT_LIST_GET_LEN(buf_pool->LRU) - UT_LIST_GET_LEN(buf_pool->LRU)
- UT_LIST_GET_LEN(buf_pool->free); - UT_LIST_GET_LEN(buf_pool->free);
#ifdef HAVE_GCC_ATOMIC_BUILTINS
export_vars.innodb_have_atomic_builtins = 1;
#else
export_vars.innodb_have_atomic_builtins = 0;
#endif
#ifdef UNIV_DISABLE_MEM_POOL
export_vars.innodb_heap_enabled = 0;
#else
export_vars.innodb_heap_enabled = 1;
#endif
export_vars.innodb_page_size = UNIV_PAGE_SIZE; export_vars.innodb_page_size = UNIV_PAGE_SIZE;
export_vars.innodb_log_waits = srv_log_waits; export_vars.innodb_log_waits = srv_log_waits;
export_vars.innodb_os_log_written = srv_os_log_written; export_vars.innodb_os_log_written = srv_os_log_written;
......
...@@ -1063,6 +1063,16 @@ innobase_start_or_create_for_mysql(void) ...@@ -1063,6 +1063,16 @@ innobase_start_or_create_for_mysql(void)
return(DB_ERROR); return(DB_ERROR);
} }
#ifdef UNIV_DISABLE_MEM_POOL
fprintf(stderr,
"InnoDB: The InnoDB memory heap has been disabled.\n");
#endif
#ifdef HAVE_GCC_ATOMIC_BUILTINS
fprintf(stderr,
"InnoDB: Mutex and rw_lock use GCC atomic builtins.\n");
#endif
/* Since InnoDB does not currently clean up all its internal data /* Since InnoDB does not currently clean up all its internal data
structures in MySQL Embedded Server Library server_end(), we structures in MySQL Embedded Server Library server_end(), we
print an error message if someone tries to start up InnoDB a print an error message if someone tries to start up InnoDB a
......
...@@ -295,28 +295,25 @@ sync_array_validate( ...@@ -295,28 +295,25 @@ sync_array_validate(
} }
/*********************************************************************** /***********************************************************************
Puts the cell event in reset state. */ Returns the event that the thread owning the cell waits for. */
static static
ib_longlong os_event_t
sync_cell_event_reset( sync_cell_get_event(
/*==================*/ /*================*/
/* out: value of signal_count sync_cell_t* cell) /* in: non-empty sync array cell */
at the time of reset. */
ulint type, /* in: lock type mutex/rw_lock */
void* object) /* in: the rw_lock/mutex object */
{ {
ulint type = cell->request_type;
if (type == SYNC_MUTEX) { if (type == SYNC_MUTEX) {
return(os_event_reset(((mutex_t *) object)->event)); return(((mutex_t *) cell->wait_object)->event);
#ifdef __WIN__
} else if (type == RW_LOCK_WAIT_EX) { } else if (type == RW_LOCK_WAIT_EX) {
return(os_event_reset( return(((rw_lock_t *) cell->wait_object)->wait_ex_event);
((rw_lock_t *) object)->wait_ex_event)); } else { /* RW_LOCK_SHARED and RW_LOCK_EX wait on the same event */
#endif return(((rw_lock_t *) cell->wait_object)->event);
} else {
return(os_event_reset(((rw_lock_t *) object)->event));
} }
} }
/********************************************************************** /**********************************************************************
Reserves a wait array cell for waiting for an object. Reserves a wait array cell for waiting for an object.
The event of the cell is reset to nonsignalled state. */ The event of the cell is reset to nonsignalled state. */
...@@ -332,6 +329,7 @@ sync_array_reserve_cell( ...@@ -332,6 +329,7 @@ sync_array_reserve_cell(
ulint* index) /* out: index of the reserved cell */ ulint* index) /* out: index of the reserved cell */
{ {
sync_cell_t* cell; sync_cell_t* cell;
os_event_t event;
ulint i; ulint i;
ut_a(object); ut_a(object);
...@@ -370,8 +368,8 @@ sync_array_reserve_cell( ...@@ -370,8 +368,8 @@ sync_array_reserve_cell(
/* Make sure the event is reset and also store /* Make sure the event is reset and also store
the value of signal_count at which the event the value of signal_count at which the event
was reset. */ was reset. */
cell->signal_count = sync_cell_event_reset(type, event = sync_cell_get_event(cell);
object); cell->signal_count = os_event_reset(event);
cell->reservation_time = time(NULL); cell->reservation_time = time(NULL);
...@@ -411,19 +409,7 @@ sync_array_wait_event( ...@@ -411,19 +409,7 @@ sync_array_wait_event(
ut_a(!cell->waiting); ut_a(!cell->waiting);
ut_ad(os_thread_get_curr_id() == cell->thread); ut_ad(os_thread_get_curr_id() == cell->thread);
if (cell->request_type == SYNC_MUTEX) { event = sync_cell_get_event(cell);
event = ((mutex_t*) cell->wait_object)->event;
#ifdef __WIN__
/* On windows if the thread about to wait is the one which
has set the state of the rw_lock to RW_LOCK_WAIT_EX, then
it waits on a special event i.e.: wait_ex_event. */
} else if (cell->request_type == RW_LOCK_WAIT_EX) {
event = ((rw_lock_t*) cell->wait_object)->wait_ex_event;
#endif
} else {
event = ((rw_lock_t*) cell->wait_object)->event;
}
cell->waiting = TRUE; cell->waiting = TRUE;
#ifdef UNIV_SYNC_DEBUG #ifdef UNIV_SYNC_DEBUG
...@@ -462,6 +448,7 @@ sync_array_cell_print( ...@@ -462,6 +448,7 @@ sync_array_cell_print(
mutex_t* mutex; mutex_t* mutex;
rw_lock_t* rwlock; rw_lock_t* rwlock;
ulint type; ulint type;
ulint writer;
type = cell->request_type; type = cell->request_type;
...@@ -491,9 +478,7 @@ sync_array_cell_print( ...@@ -491,9 +478,7 @@ sync_array_cell_print(
(ulong) mutex->waiters); (ulong) mutex->waiters);
} else if (type == RW_LOCK_EX } else if (type == RW_LOCK_EX
#ifdef __WIN__
|| type == RW_LOCK_WAIT_EX || type == RW_LOCK_WAIT_EX
#endif
|| type == RW_LOCK_SHARED) { || type == RW_LOCK_SHARED) {
fputs(type == RW_LOCK_EX ? "X-lock on" : "S-lock on", file); fputs(type == RW_LOCK_EX ? "X-lock on" : "S-lock on", file);
...@@ -504,22 +489,25 @@ sync_array_cell_print( ...@@ -504,22 +489,25 @@ sync_array_cell_print(
" RW-latch at %p created in file %s line %lu\n", " RW-latch at %p created in file %s line %lu\n",
(void*) rwlock, rwlock->cfile_name, (void*) rwlock, rwlock->cfile_name,
(ulong) rwlock->cline); (ulong) rwlock->cline);
if (rwlock->writer != RW_LOCK_NOT_LOCKED) { writer = rw_lock_get_writer(rwlock);
if (writer != RW_LOCK_NOT_LOCKED) {
fprintf(file, fprintf(file,
"a writer (thread id %lu) has" "a writer (thread id %lu) has"
" reserved it in mode %s", " reserved it in mode %s",
(ulong) os_thread_pf(rwlock->writer_thread), (ulong) os_thread_pf(rwlock->writer_thread),
rwlock->writer == RW_LOCK_EX writer == RW_LOCK_EX
? " exclusive\n" ? " exclusive\n"
: " wait exclusive\n"); : " wait exclusive\n");
} }
fprintf(file, fprintf(file,
"number of readers %lu, waiters flag %lu\n" "number of readers %lu, waiters flag %lu, "
"lock_word: %ld\n"
"Last time read locked in file %s line %lu\n" "Last time read locked in file %s line %lu\n"
"Last time write locked in file %s line %lu\n", "Last time write locked in file %s line %lu\n",
(ulong) rwlock->reader_count, (ulong) rw_lock_get_reader_count(rwlock),
(ulong) rwlock->waiters, (ulong) rwlock->waiters,
rwlock->lock_word,
rwlock->last_s_file_name, rwlock->last_s_file_name,
(ulong) rwlock->last_s_line, (ulong) rwlock->last_s_line,
rwlock->last_x_file_name, rwlock->last_x_file_name,
...@@ -778,28 +766,30 @@ sync_arr_cell_can_wake_up( ...@@ -778,28 +766,30 @@ sync_arr_cell_can_wake_up(
return(TRUE); return(TRUE);
} }
} else if (cell->request_type == RW_LOCK_EX } else if (cell->request_type == RW_LOCK_EX) {
|| cell->request_type == RW_LOCK_WAIT_EX) {
lock = cell->wait_object; lock = cell->wait_object;
if (rw_lock_get_reader_count(lock) == 0 /* X_LOCK_DECR is the unlocked state */
&& rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED) { if (lock->lock_word == X_LOCK_DECR) {
return(TRUE); return(TRUE);
} }
if (rw_lock_get_reader_count(lock) == 0 } else if (cell->request_type == RW_LOCK_WAIT_EX) {
&& rw_lock_get_writer(lock) == RW_LOCK_WAIT_EX
&& os_thread_eq(lock->writer_thread, cell->thread)) { lock = cell->wait_object;
/* lock_word == 0 means all readers have left */
if (lock->lock_word == 0) {
return(TRUE); return(TRUE);
} }
} else if (cell->request_type == RW_LOCK_SHARED) { } else if (cell->request_type == RW_LOCK_SHARED) {
lock = cell->wait_object; lock = cell->wait_object;
if (rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED) { /* lock_word > 0 means no writer or reserved writer */
if (lock->lock_word > 0) {
return(TRUE); return(TRUE);
} }
...@@ -844,11 +834,15 @@ sync_array_object_signalled( ...@@ -844,11 +834,15 @@ sync_array_object_signalled(
/*========================*/ /*========================*/
sync_array_t* arr) /* in: wait array */ sync_array_t* arr) /* in: wait array */
{ {
#ifdef HAVE_GCC_ATOMIC_BUILTINS
__sync_fetch_and_add(&(arr->sg_count),1);
#else
sync_array_enter(arr); sync_array_enter(arr);
arr->sg_count++; arr->sg_count++;
sync_array_exit(arr); sync_array_exit(arr);
#endif
} }
/************************************************************************** /**************************************************************************
...@@ -868,6 +862,7 @@ sync_arr_wake_threads_if_sema_free(void) ...@@ -868,6 +862,7 @@ sync_arr_wake_threads_if_sema_free(void)
sync_cell_t* cell; sync_cell_t* cell;
ulint count; ulint count;
ulint i; ulint i;
os_event_t event;
sync_array_enter(arr); sync_array_enter(arr);
...@@ -877,36 +872,20 @@ sync_arr_wake_threads_if_sema_free(void) ...@@ -877,36 +872,20 @@ sync_arr_wake_threads_if_sema_free(void)
while (count < arr->n_reserved) { while (count < arr->n_reserved) {
cell = sync_array_get_nth_cell(arr, i); cell = sync_array_get_nth_cell(arr, i);
i++;
if (cell->wait_object != NULL) { if (cell->wait_object == NULL) {
continue;
}
count++; count++;
if (sync_arr_cell_can_wake_up(cell)) { if (sync_arr_cell_can_wake_up(cell)) {
if (cell->request_type == SYNC_MUTEX) { event = sync_cell_get_event(cell);
mutex_t* mutex;
mutex = cell->wait_object; os_event_set(event);
os_event_set(mutex->event);
#ifdef __WIN__
} else if (cell->request_type
== RW_LOCK_WAIT_EX) {
rw_lock_t* lock;
lock = cell->wait_object;
os_event_set(lock->wait_ex_event);
#endif
} else {
rw_lock_t* lock;
lock = cell->wait_object;
os_event_set(lock->event);
}
}
} }
i++;
} }
sync_array_exit(arr); sync_array_exit(arr);
...@@ -1026,4 +1005,3 @@ sync_array_print_info( ...@@ -1026,4 +1005,3 @@ sync_array_print_info(
sync_array_exit(arr); sync_array_exit(arr);
} }
This diff is collapsed.
...@@ -138,18 +138,13 @@ Therefore, this thread is guaranteed to catch the os_set_event() ...@@ -138,18 +138,13 @@ Therefore, this thread is guaranteed to catch the os_set_event()
signalled unconditionally at the release of the lock. signalled unconditionally at the release of the lock.
Q.E.D. */ Q.E.D. */
/* The number of system calls made in this module. Intended for performance
monitoring. */
ulint mutex_system_call_count = 0;
/* Number of spin waits on mutexes: for performance monitoring */ /* Number of spin waits on mutexes: for performance monitoring */
/* round=one iteration of a spin loop */ /* round=one iteration of a spin loop */
ulint mutex_spin_round_count = 0; ib_longlong mutex_spin_round_count = 0;
ulint mutex_spin_wait_count = 0; ib_longlong mutex_spin_wait_count = 0;
ulint mutex_os_wait_count = 0; ib_longlong mutex_os_wait_count = 0;
ulint mutex_exit_count = 0; ib_longlong mutex_exit_count = 0;
/* The global array of wait cells for implementation of the database's own /* The global array of wait cells for implementation of the database's own
mutexes and read-write locks */ mutexes and read-write locks */
...@@ -243,6 +238,8 @@ mutex_create_func( ...@@ -243,6 +238,8 @@ mutex_create_func(
{ {
#if defined(_WIN32) && defined(UNIV_CAN_USE_X86_ASSEMBLER) #if defined(_WIN32) && defined(UNIV_CAN_USE_X86_ASSEMBLER)
mutex_reset_lock_word(mutex); mutex_reset_lock_word(mutex);
#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
mutex_reset_lock_word(mutex);
#else #else
os_fast_mutex_init(&(mutex->os_fast_mutex)); os_fast_mutex_init(&(mutex->os_fast_mutex));
mutex->lock_word = 0; mutex->lock_word = 0;
...@@ -333,7 +330,9 @@ mutex_free( ...@@ -333,7 +330,9 @@ mutex_free(
os_event_free(mutex->event); os_event_free(mutex->event);
#if !defined(_WIN32) || !defined(UNIV_CAN_USE_X86_ASSEMBLER) #if defined(_WIN32) && defined(UNIV_CAN_USE_X86_ASSEMBLER)
#elif defined(HAVE_GCC_ATOMIC_BUILTINS)
#else
os_fast_mutex_free(&(mutex->os_fast_mutex)); os_fast_mutex_free(&(mutex->os_fast_mutex));
#endif #endif
/* If we free the mutex protecting the mutex list (freeing is /* If we free the mutex protecting the mutex list (freeing is
...@@ -450,6 +449,12 @@ mutex_spin_wait( ...@@ -450,6 +449,12 @@ mutex_spin_wait(
#endif /* UNIV_DEBUG && !UNIV_HOTBACKUP */ #endif /* UNIV_DEBUG && !UNIV_HOTBACKUP */
ut_ad(mutex); ut_ad(mutex);
/* This update is not thread safe, but we don't mind if the count
isn't exact. Moved out of ifdef that follows because we are willing
to sacrifice the cost of counting this as the data is valuable.
Count the number of calls to mutex_spin_wait. */
mutex_spin_wait_count++;
mutex_loop: mutex_loop:
i = 0; i = 0;
...@@ -462,7 +467,6 @@ mutex_spin_wait( ...@@ -462,7 +467,6 @@ mutex_spin_wait(
spin_loop: spin_loop:
#if defined UNIV_DEBUG && !defined UNIV_HOTBACKUP #if defined UNIV_DEBUG && !defined UNIV_HOTBACKUP
mutex_spin_wait_count++;
mutex->count_spin_loop++; mutex->count_spin_loop++;
#endif /* UNIV_DEBUG && !UNIV_HOTBACKUP */ #endif /* UNIV_DEBUG && !UNIV_HOTBACKUP */
...@@ -527,8 +531,6 @@ mutex_spin_wait( ...@@ -527,8 +531,6 @@ mutex_spin_wait(
sync_array_reserve_cell(sync_primary_wait_array, mutex, sync_array_reserve_cell(sync_primary_wait_array, mutex,
SYNC_MUTEX, file_name, line, &index); SYNC_MUTEX, file_name, line, &index);
mutex_system_call_count++;
/* The memory order of the array reservation and the change in the /* The memory order of the array reservation and the change in the
waiters field is important: when we suspend a thread, we first waiters field is important: when we suspend a thread, we first
reserve the cell and then set waiters field to 1. When threads are reserve the cell and then set waiters field to 1. When threads are
...@@ -575,7 +577,6 @@ mutex_spin_wait( ...@@ -575,7 +577,6 @@ mutex_spin_wait(
mutex->cfile_name, (ulong) mutex->cline, (ulong) i); mutex->cfile_name, (ulong) mutex->cline, (ulong) i);
#endif #endif
mutex_system_call_count++;
mutex_os_wait_count++; mutex_os_wait_count++;
#ifndef UNIV_HOTBACKUP #ifndef UNIV_HOTBACKUP
...@@ -1377,21 +1378,31 @@ sync_print_wait_info( ...@@ -1377,21 +1378,31 @@ sync_print_wait_info(
FILE* file) /* in: file where to print */ FILE* file) /* in: file where to print */
{ {
#ifdef UNIV_SYNC_DEBUG #ifdef UNIV_SYNC_DEBUG
fprintf(file, "Mutex exits %lu, rws exits %lu, rwx exits %lu\n", fprintf(file, "Mutex exits %llu, rws exits %llu, rwx exits %llu\n",
mutex_exit_count, rw_s_exit_count, rw_x_exit_count); mutex_exit_count, rw_s_exit_count, rw_x_exit_count);
#endif #endif
fprintf(file, fprintf(file,
"Mutex spin waits %lu, rounds %lu, OS waits %lu\n" "Mutex spin waits %llu, rounds %llu, OS waits %llu\n"
"RW-shared spins %lu, OS waits %lu;" "RW-shared spins %llu, OS waits %llu;"
" RW-excl spins %lu, OS waits %lu\n", " RW-excl spins %llu, OS waits %llu\n",
(ulong) mutex_spin_wait_count, mutex_spin_wait_count,
(ulong) mutex_spin_round_count, mutex_spin_round_count,
(ulong) mutex_os_wait_count, mutex_os_wait_count,
(ulong) rw_s_spin_wait_count, rw_s_spin_wait_count,
(ulong) rw_s_os_wait_count, rw_s_os_wait_count,
(ulong) rw_x_spin_wait_count, rw_x_spin_wait_count,
(ulong) rw_x_os_wait_count); rw_x_os_wait_count);
fprintf(file,
"Spin rounds per wait: %.2f mutex, %.2f RW-shared, "
"%.2f RW-excl\n",
(double) mutex_spin_round_count /
(mutex_spin_wait_count ? mutex_spin_wait_count : 1),
(double) rw_s_spin_round_count /
(rw_s_spin_wait_count ? rw_s_spin_wait_count : 1),
(double) rw_x_spin_round_count /
(rw_x_spin_wait_count ? rw_x_spin_wait_count : 1));
} }
/*********************************************************************** /***********************************************************************
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment