Commit 9581c4a8 authored by Sergey Vojtovich's avatar Sergey Vojtovich

MDEV-17441 - InnoDB transition to C++11 atomics

Almost trivial rw_lock_t::lock_word transition. Since C++11 doesn't
seem to allow mixed (atomic and non-atomic) access to atomic variables,
we have to perform atomic initialisation.

Also made previously broken code in gis0sea.cc even more broken. It is
unclear how it was supposed to work and what exactly it was supposed to
do.
parent 4404ee29
...@@ -1567,8 +1567,8 @@ rtr_copy_buf( ...@@ -1567,8 +1567,8 @@ rtr_copy_buf(
matches->block.curr_left_side = block->curr_left_side; matches->block.curr_left_side = block->curr_left_side;
matches->block.index = block->index; matches->block.index = block->index;
#endif /* BTR_CUR_HASH_ADAPT */ #endif /* BTR_CUR_HASH_ADAPT */
ut_d(matches->block.debug_latch = block->debug_latch); ut_d(memcpy(&matches->block.debug_latch, &block->debug_latch,
sizeof(rw_lock_t)));
} }
/****************************************************************//** /****************************************************************//**
......
...@@ -569,7 +569,7 @@ struct rw_lock_t ...@@ -569,7 +569,7 @@ struct rw_lock_t
#endif /* UNIV_DEBUG */ #endif /* UNIV_DEBUG */
{ {
/** Holds the state of the lock. */ /** Holds the state of the lock. */
int32_t lock_word; std::atomic<int32_t> lock_word;
/** 1: there are waiters */ /** 1: there are waiters */
int32_t waiters; int32_t waiters;
......
...@@ -77,8 +77,7 @@ rw_lock_get_writer( ...@@ -77,8 +77,7 @@ rw_lock_get_writer(
/*===============*/ /*===============*/
const rw_lock_t* lock) /*!< in: rw-lock */ const rw_lock_t* lock) /*!< in: rw-lock */
{ {
int32_t lock_word = my_atomic_load32_explicit(const_cast<int32_t*>(&lock->lock_word), auto lock_word = lock->lock_word.load(std::memory_order_relaxed);
MY_MEMORY_ORDER_RELAXED);
ut_ad(lock_word <= X_LOCK_DECR); ut_ad(lock_word <= X_LOCK_DECR);
if (lock_word > X_LOCK_HALF_DECR) { if (lock_word > X_LOCK_HALF_DECR) {
...@@ -110,8 +109,7 @@ rw_lock_get_reader_count( ...@@ -110,8 +109,7 @@ rw_lock_get_reader_count(
/*=====================*/ /*=====================*/
const rw_lock_t* lock) /*!< in: rw-lock */ const rw_lock_t* lock) /*!< in: rw-lock */
{ {
int32_t lock_word = my_atomic_load32_explicit(const_cast<int32_t*>(&lock->lock_word), auto lock_word = lock->lock_word.load(std::memory_order_relaxed);
MY_MEMORY_ORDER_RELAXED);
ut_ad(lock_word <= X_LOCK_DECR); ut_ad(lock_word <= X_LOCK_DECR);
if (lock_word > X_LOCK_HALF_DECR) { if (lock_word > X_LOCK_HALF_DECR) {
...@@ -147,8 +145,7 @@ rw_lock_get_x_lock_count( ...@@ -147,8 +145,7 @@ rw_lock_get_x_lock_count(
/*=====================*/ /*=====================*/
const rw_lock_t* lock) /*!< in: rw-lock */ const rw_lock_t* lock) /*!< in: rw-lock */
{ {
int32_t lock_copy = my_atomic_load32_explicit(const_cast<int32_t*>(&lock->lock_word), auto lock_copy = lock->lock_word.load(std::memory_order_relaxed);
MY_MEMORY_ORDER_RELAXED);
ut_ad(lock_copy <= X_LOCK_DECR); ut_ad(lock_copy <= X_LOCK_DECR);
if (lock_copy == 0 || lock_copy == -X_LOCK_HALF_DECR) { if (lock_copy == 0 || lock_copy == -X_LOCK_HALF_DECR) {
...@@ -181,8 +178,7 @@ rw_lock_get_sx_lock_count( ...@@ -181,8 +178,7 @@ rw_lock_get_sx_lock_count(
const rw_lock_t* lock) /*!< in: rw-lock */ const rw_lock_t* lock) /*!< in: rw-lock */
{ {
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
int32_t lock_copy = my_atomic_load32_explicit(const_cast<int32_t*>(&lock->lock_word), auto lock_copy = lock->lock_word.load(std::memory_order_relaxed);
MY_MEMORY_ORDER_RELAXED);
ut_ad(lock_copy <= X_LOCK_DECR); ut_ad(lock_copy <= X_LOCK_DECR);
...@@ -213,14 +209,15 @@ rw_lock_lock_word_decr( ...@@ -213,14 +209,15 @@ rw_lock_lock_word_decr(
int32_t amount, /*!< in: amount to decrement */ int32_t amount, /*!< in: amount to decrement */
int32_t threshold) /*!< in: threshold of judgement */ int32_t threshold) /*!< in: threshold of judgement */
{ {
int32_t lock_copy = my_atomic_load32_explicit(&lock->lock_word, auto lock_copy = lock->lock_word.load(std::memory_order_relaxed);
MY_MEMORY_ORDER_RELAXED);
while (lock_copy > threshold) { while (lock_copy > threshold) {
if (my_atomic_cas32_strong_explicit(&lock->lock_word, if (lock->lock_word.compare_exchange_strong(
&lock_copy, lock_copy,
lock_copy - amount, lock_copy - amount,
MY_MEMORY_ORDER_ACQUIRE, std::memory_order_acquire,
MY_MEMORY_ORDER_RELAXED)) { std::memory_order_relaxed)) {
return(true); return(true);
} }
} }
...@@ -304,9 +301,9 @@ rw_lock_x_lock_func_nowait( ...@@ -304,9 +301,9 @@ rw_lock_x_lock_func_nowait(
{ {
int32_t oldval = X_LOCK_DECR; int32_t oldval = X_LOCK_DECR;
if (my_atomic_cas32_strong_explicit(&lock->lock_word, &oldval, 0, if (lock->lock_word.compare_exchange_strong(oldval, 0,
MY_MEMORY_ORDER_ACQUIRE, std::memory_order_acquire,
MY_MEMORY_ORDER_RELAXED)) { std::memory_order_relaxed)) {
lock->writer_thread = os_thread_get_curr_id(); lock->writer_thread = os_thread_get_curr_id();
} else if (os_thread_eq(lock->writer_thread, os_thread_get_curr_id())) { } else if (os_thread_eq(lock->writer_thread, os_thread_get_curr_id())) {
...@@ -316,12 +313,12 @@ rw_lock_x_lock_func_nowait( ...@@ -316,12 +313,12 @@ rw_lock_x_lock_func_nowait(
observe consistent values. */ observe consistent values. */
if (oldval == 0 || oldval == -X_LOCK_HALF_DECR) { if (oldval == 0 || oldval == -X_LOCK_HALF_DECR) {
/* There are 1 x-locks */ /* There are 1 x-locks */
my_atomic_add32_explicit(&lock->lock_word, -X_LOCK_DECR, lock->lock_word.fetch_sub(X_LOCK_DECR,
MY_MEMORY_ORDER_RELAXED); std::memory_order_relaxed);
} else if (oldval <= -X_LOCK_DECR) { } else if (oldval <= -X_LOCK_DECR) {
/* There are 2 or more x-locks */ /* There are 2 or more x-locks */
my_atomic_add32_explicit(&lock->lock_word, -1, lock->lock_word.fetch_sub(1,
MY_MEMORY_ORDER_RELAXED); std::memory_order_relaxed);
/* Watch for too many recursive locks */ /* Watch for too many recursive locks */
ut_ad(oldval < 1); ut_ad(oldval < 1);
} else { } else {
...@@ -356,8 +353,7 @@ rw_lock_s_unlock_func( ...@@ -356,8 +353,7 @@ rw_lock_s_unlock_func(
rw_lock_t* lock) /*!< in/out: rw-lock */ rw_lock_t* lock) /*!< in/out: rw-lock */
{ {
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
int32_t dbg_lock_word = my_atomic_load32_explicit(&lock->lock_word, auto dbg_lock_word = lock->lock_word.load(std::memory_order_relaxed);
MY_MEMORY_ORDER_RELAXED);
ut_ad(dbg_lock_word > -X_LOCK_DECR); ut_ad(dbg_lock_word > -X_LOCK_DECR);
ut_ad(dbg_lock_word != 0); ut_ad(dbg_lock_word != 0);
ut_ad(dbg_lock_word < X_LOCK_DECR); ut_ad(dbg_lock_word < X_LOCK_DECR);
...@@ -366,8 +362,8 @@ rw_lock_s_unlock_func( ...@@ -366,8 +362,8 @@ rw_lock_s_unlock_func(
ut_d(rw_lock_remove_debug_info(lock, pass, RW_LOCK_S)); ut_d(rw_lock_remove_debug_info(lock, pass, RW_LOCK_S));
/* Increment lock_word to indicate 1 less reader */ /* Increment lock_word to indicate 1 less reader */
int32_t lock_word = my_atomic_add32_explicit(&lock->lock_word, 1, auto lock_word = lock->lock_word.fetch_add(
MY_MEMORY_ORDER_RELEASE) + 1; 1, std::memory_order_release) + 1;
if (lock_word == 0 || lock_word == -X_LOCK_HALF_DECR) { if (lock_word == 0 || lock_word == -X_LOCK_HALF_DECR) {
/* wait_ex waiter exists. It may not be asleep, but we signal /* wait_ex waiter exists. It may not be asleep, but we signal
...@@ -393,8 +389,7 @@ rw_lock_x_unlock_func( ...@@ -393,8 +389,7 @@ rw_lock_x_unlock_func(
#endif /* UNIV_DEBUG */ #endif /* UNIV_DEBUG */
rw_lock_t* lock) /*!< in/out: rw-lock */ rw_lock_t* lock) /*!< in/out: rw-lock */
{ {
int32_t lock_word = my_atomic_load32_explicit(&lock->lock_word, auto lock_word = lock->lock_word.load(std::memory_order_relaxed);
MY_MEMORY_ORDER_RELAXED);
ut_ad(lock_word == 0 || lock_word == -X_LOCK_HALF_DECR ut_ad(lock_word == 0 || lock_word == -X_LOCK_HALF_DECR
|| lock_word <= -X_LOCK_DECR); || lock_word <= -X_LOCK_DECR);
...@@ -411,8 +406,8 @@ rw_lock_x_unlock_func( ...@@ -411,8 +406,8 @@ rw_lock_x_unlock_func(
ACQ_REL due to... ACQ_REL due to...
RELEASE: we release rw-lock RELEASE: we release rw-lock
ACQUIRE: we want waiters to be loaded after lock_word is stored */ ACQUIRE: we want waiters to be loaded after lock_word is stored */
my_atomic_add32_explicit(&lock->lock_word, X_LOCK_DECR, lock->lock_word.fetch_add(X_LOCK_DECR,
MY_MEMORY_ORDER_ACQ_REL); std::memory_order_acq_rel);
/* This no longer has an X-lock but it may still have /* This no longer has an X-lock but it may still have
an SX-lock. So it is now free for S-locks by other threads. an SX-lock. So it is now free for S-locks by other threads.
...@@ -429,13 +424,13 @@ rw_lock_x_unlock_func( ...@@ -429,13 +424,13 @@ rw_lock_x_unlock_func(
} else if (lock_word == -X_LOCK_DECR } else if (lock_word == -X_LOCK_DECR
|| lock_word == -(X_LOCK_DECR + X_LOCK_HALF_DECR)) { || lock_word == -(X_LOCK_DECR + X_LOCK_HALF_DECR)) {
/* There are 2 x-locks */ /* There are 2 x-locks */
my_atomic_add32_explicit(&lock->lock_word, X_LOCK_DECR, lock->lock_word.fetch_add(X_LOCK_DECR,
MY_MEMORY_ORDER_RELAXED); std::memory_order_relaxed);
} else { } else {
/* There are more than 2 x-locks. */ /* There are more than 2 x-locks. */
ut_ad(lock_word < -X_LOCK_DECR); ut_ad(lock_word < -X_LOCK_DECR);
my_atomic_add32_explicit(&lock->lock_word, 1, lock->lock_word.fetch_add(1,
MY_MEMORY_ORDER_RELAXED); std::memory_order_relaxed);
} }
ut_ad(rw_lock_validate(lock)); ut_ad(rw_lock_validate(lock));
...@@ -461,8 +456,8 @@ rw_lock_sx_unlock_func( ...@@ -461,8 +456,8 @@ rw_lock_sx_unlock_func(
ut_d(rw_lock_remove_debug_info(lock, pass, RW_LOCK_SX)); ut_d(rw_lock_remove_debug_info(lock, pass, RW_LOCK_SX));
if (lock->sx_recursive == 0) { if (lock->sx_recursive == 0) {
int32_t lock_word = my_atomic_load32_explicit(&lock->lock_word, auto lock_word =
MY_MEMORY_ORDER_RELAXED); lock->lock_word.load(std::memory_order_relaxed);
/* Last caller in a possible recursive chain. */ /* Last caller in a possible recursive chain. */
if (lock_word > 0) { if (lock_word > 0) {
lock->writer_thread = 0; lock->writer_thread = 0;
...@@ -472,8 +467,8 @@ rw_lock_sx_unlock_func( ...@@ -472,8 +467,8 @@ rw_lock_sx_unlock_func(
ACQ_REL due to... ACQ_REL due to...
RELEASE: we release rw-lock RELEASE: we release rw-lock
ACQUIRE: we want waiters to be loaded after lock_word is stored */ ACQUIRE: we want waiters to be loaded after lock_word is stored */
my_atomic_add32_explicit(&lock->lock_word, X_LOCK_HALF_DECR, lock->lock_word.fetch_add(X_LOCK_HALF_DECR,
MY_MEMORY_ORDER_ACQ_REL); std::memory_order_acq_rel);
/* Lock is now free. May have to signal read/write /* Lock is now free. May have to signal read/write
waiters. We do not need to signal wait_ex waiters, waiters. We do not need to signal wait_ex waiters,
...@@ -490,8 +485,8 @@ rw_lock_sx_unlock_func( ...@@ -490,8 +485,8 @@ rw_lock_sx_unlock_func(
/* still has x-lock */ /* still has x-lock */
ut_ad(lock_word == -X_LOCK_HALF_DECR || ut_ad(lock_word == -X_LOCK_HALF_DECR ||
lock_word <= -(X_LOCK_DECR + X_LOCK_HALF_DECR)); lock_word <= -(X_LOCK_DECR + X_LOCK_HALF_DECR));
my_atomic_add32_explicit(&lock->lock_word, X_LOCK_HALF_DECR, lock->lock_word.fetch_add(X_LOCK_HALF_DECR,
MY_MEMORY_ORDER_RELAXED); std::memory_order_relaxed);
} }
} }
......
...@@ -592,7 +592,7 @@ sync_array_cell_print( ...@@ -592,7 +592,7 @@ sync_array_cell_print(
"\n", "\n",
rw_lock_get_reader_count(rwlock), rw_lock_get_reader_count(rwlock),
my_atomic_load32_explicit(&rwlock->waiters, MY_MEMORY_ORDER_RELAXED), my_atomic_load32_explicit(&rwlock->waiters, MY_MEMORY_ORDER_RELAXED),
my_atomic_load32_explicit(&rwlock->lock_word, MY_MEMORY_ORDER_RELAXED), rwlock->lock_word.load(std::memory_order_relaxed),
innobase_basename(rwlock->last_x_file_name), innobase_basename(rwlock->last_x_file_name),
rwlock->last_x_line rwlock->last_x_line
#if 0 /* JAN: TODO: FIX LATER */ #if 0 /* JAN: TODO: FIX LATER */
...@@ -1381,7 +1381,7 @@ sync_arr_fill_sys_semphore_waits_table( ...@@ -1381,7 +1381,7 @@ sync_arr_fill_sys_semphore_waits_table(
OK(field_store_ulint(fields[SYS_SEMAPHORE_WAITS_WAITERS_FLAG], OK(field_store_ulint(fields[SYS_SEMAPHORE_WAITS_WAITERS_FLAG],
my_atomic_load32_explicit(&rwlock->waiters, MY_MEMORY_ORDER_RELAXED))); my_atomic_load32_explicit(&rwlock->waiters, MY_MEMORY_ORDER_RELAXED)));
OK(field_store_ulint(fields[SYS_SEMAPHORE_WAITS_LOCK_WORD], OK(field_store_ulint(fields[SYS_SEMAPHORE_WAITS_LOCK_WORD],
my_atomic_load32_explicit(&rwlock->lock_word, MY_MEMORY_ORDER_RELAXED))); rwlock->lock_word.load(std::memory_order_relaxed)));
OK(field_store_string(fields[SYS_SEMAPHORE_WAITS_LAST_WRITER_FILE], innobase_basename(rwlock->last_x_file_name))); OK(field_store_string(fields[SYS_SEMAPHORE_WAITS_LAST_WRITER_FILE], innobase_basename(rwlock->last_x_file_name)));
OK(fields[SYS_SEMAPHORE_WAITS_LAST_WRITER_LINE]->store(rwlock->last_x_line, true)); OK(fields[SYS_SEMAPHORE_WAITS_LAST_WRITER_LINE]->store(rwlock->last_x_line, true));
fields[SYS_SEMAPHORE_WAITS_LAST_WRITER_LINE]->set_notnull(); fields[SYS_SEMAPHORE_WAITS_LAST_WRITER_LINE]->set_notnull();
......
...@@ -205,7 +205,7 @@ rw_lock_create_func( ...@@ -205,7 +205,7 @@ rw_lock_create_func(
/* If this is the very first time a synchronization object is /* If this is the very first time a synchronization object is
created, then the following call initializes the sync system. */ created, then the following call initializes the sync system. */
lock->lock_word = X_LOCK_DECR; lock->lock_word.store(X_LOCK_DECR, std::memory_order_relaxed);
lock->waiters = 0; lock->waiters = 0;
lock->sx_recursive = 0; lock->sx_recursive = 0;
...@@ -257,8 +257,7 @@ rw_lock_free_func( ...@@ -257,8 +257,7 @@ rw_lock_free_func(
rw_lock_t* lock) /*!< in/out: rw-lock */ rw_lock_t* lock) /*!< in/out: rw-lock */
{ {
ut_ad(rw_lock_validate(lock)); ut_ad(rw_lock_validate(lock));
ut_a(my_atomic_load32_explicit(&lock->lock_word, ut_a(lock->lock_word.load(std::memory_order_relaxed) == X_LOCK_DECR);
MY_MEMORY_ORDER_RELAXED) == X_LOCK_DECR);
mutex_enter(&rw_lock_list_mutex); mutex_enter(&rw_lock_list_mutex);
...@@ -306,8 +305,7 @@ rw_lock_s_lock_spin( ...@@ -306,8 +305,7 @@ rw_lock_s_lock_spin(
/* Spin waiting for the writer field to become free */ /* Spin waiting for the writer field to become free */
HMT_low(); HMT_low();
while (i < srv_n_spin_wait_rounds && while (i < srv_n_spin_wait_rounds &&
my_atomic_load32_explicit(&lock->lock_word, lock->lock_word.load(std::memory_order_relaxed) <= 0) {
MY_MEMORY_ORDER_RELAXED) <= 0) {
ut_delay(srv_spin_wait_delay); ut_delay(srv_spin_wait_delay);
i++; i++;
} }
...@@ -425,10 +423,10 @@ rw_lock_x_lock_wait_func( ...@@ -425,10 +423,10 @@ rw_lock_x_lock_wait_func(
sync_array_t* sync_arr; sync_array_t* sync_arr;
int64_t count_os_wait = 0; int64_t count_os_wait = 0;
ut_ad(my_atomic_load32_explicit(&lock->lock_word, MY_MEMORY_ORDER_RELAXED) <= threshold); ut_ad(lock->lock_word.load(std::memory_order_relaxed) <= threshold);
HMT_low(); HMT_low();
while (my_atomic_load32_explicit(&lock->lock_word, MY_MEMORY_ORDER_RELAXED) < threshold) { while (lock->lock_word.load(std::memory_order_relaxed) < threshold) {
ut_delay(srv_spin_wait_delay); ut_delay(srv_spin_wait_delay);
if (i < srv_n_spin_wait_rounds) { if (i < srv_n_spin_wait_rounds) {
...@@ -447,7 +445,7 @@ rw_lock_x_lock_wait_func( ...@@ -447,7 +445,7 @@ rw_lock_x_lock_wait_func(
i = 0; i = 0;
/* Check lock_word to ensure wake-up isn't missed.*/ /* Check lock_word to ensure wake-up isn't missed.*/
if (my_atomic_load32_explicit(&lock->lock_word, MY_MEMORY_ORDER_RELAXED) < threshold) { if (lock->lock_word.load(std::memory_order_relaxed) < threshold) {
++count_os_wait; ++count_os_wait;
...@@ -537,18 +535,17 @@ rw_lock_x_lock_low( ...@@ -537,18 +535,17 @@ rw_lock_x_lock_low(
file_name, line); file_name, line);
} else { } else {
int32_t lock_word = my_atomic_load32_explicit(&lock->lock_word, auto lock_word = lock->lock_word.load(std::memory_order_relaxed);
MY_MEMORY_ORDER_RELAXED);
/* At least one X lock by this thread already /* At least one X lock by this thread already
exists. Add another. */ exists. Add another. */
if (lock_word == 0 if (lock_word == 0
|| lock_word == -X_LOCK_HALF_DECR) { || lock_word == -X_LOCK_HALF_DECR) {
my_atomic_add32_explicit(&lock->lock_word, -X_LOCK_DECR, lock->lock_word.fetch_sub(X_LOCK_DECR,
MY_MEMORY_ORDER_RELAXED); std::memory_order_relaxed);
} else { } else {
ut_ad(lock_word <= -X_LOCK_DECR); ut_ad(lock_word <= -X_LOCK_DECR);
my_atomic_add32_explicit(&lock->lock_word, -1, lock->lock_word.fetch_sub(1,
MY_MEMORY_ORDER_RELAXED); std::memory_order_relaxed);
} }
} }
...@@ -620,10 +617,10 @@ rw_lock_sx_lock_low( ...@@ -620,10 +617,10 @@ rw_lock_sx_lock_low(
read and write to the lock_word. */ read and write to the lock_word. */
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
int32_t lock_word = auto lock_word =
#endif #endif
my_atomic_add32_explicit(&lock->lock_word, -X_LOCK_HALF_DECR, lock->lock_word.fetch_sub(X_LOCK_HALF_DECR,
MY_MEMORY_ORDER_RELAXED); std::memory_order_relaxed);
ut_ad((lock_word == 0) ut_ad((lock_word == 0)
|| ((lock_word <= -X_LOCK_DECR) || ((lock_word <= -X_LOCK_DECR)
...@@ -691,7 +688,7 @@ rw_lock_x_lock_func( ...@@ -691,7 +688,7 @@ rw_lock_x_lock_func(
/* Spin waiting for the lock_word to become free */ /* Spin waiting for the lock_word to become free */
HMT_low(); HMT_low();
while (i < srv_n_spin_wait_rounds while (i < srv_n_spin_wait_rounds
&& my_atomic_load32_explicit(&lock->lock_word, MY_MEMORY_ORDER_RELAXED) <= X_LOCK_HALF_DECR) { && lock->lock_word.load(std::memory_order_relaxed) <= X_LOCK_HALF_DECR) {
ut_delay(srv_spin_wait_delay); ut_delay(srv_spin_wait_delay);
i++; i++;
} }
...@@ -792,7 +789,7 @@ rw_lock_sx_lock_func( ...@@ -792,7 +789,7 @@ rw_lock_sx_lock_func(
/* Spin waiting for the lock_word to become free */ /* Spin waiting for the lock_word to become free */
while (i < srv_n_spin_wait_rounds while (i < srv_n_spin_wait_rounds
&& my_atomic_load32_explicit(&lock->lock_word, MY_MEMORY_ORDER_RELAXED) <= X_LOCK_HALF_DECR) { && lock->lock_word.load(std::memory_order_relaxed) <= X_LOCK_HALF_DECR) {
ut_delay(srv_spin_wait_delay); ut_delay(srv_spin_wait_delay);
i++; i++;
} }
...@@ -859,8 +856,7 @@ rw_lock_validate( ...@@ -859,8 +856,7 @@ rw_lock_validate(
ut_ad(lock); ut_ad(lock);
lock_word = my_atomic_load32_explicit(const_cast<int32_t*>(&lock->lock_word), lock_word = lock->lock_word.load(std::memory_order_relaxed);
MY_MEMORY_ORDER_RELAXED);
ut_ad(lock->magic_n == RW_LOCK_MAGIC_N); ut_ad(lock->magic_n == RW_LOCK_MAGIC_N);
ut_ad(my_atomic_load32_explicit(const_cast<int32_t*>(&lock->waiters), ut_ad(my_atomic_load32_explicit(const_cast<int32_t*>(&lock->waiters),
...@@ -927,8 +923,7 @@ rw_lock_add_debug_info( ...@@ -927,8 +923,7 @@ rw_lock_add_debug_info(
rw_lock_debug_mutex_exit(); rw_lock_debug_mutex_exit();
if (pass == 0 && lock_type != RW_LOCK_X_WAIT) { if (pass == 0 && lock_type != RW_LOCK_X_WAIT) {
int32_t lock_word = my_atomic_load32_explicit(&lock->lock_word, auto lock_word = lock->lock_word.load(std::memory_order_relaxed);
MY_MEMORY_ORDER_RELAXED);
/* Recursive x while holding SX /* Recursive x while holding SX
(lock_type == RW_LOCK_X && lock_word == -X_LOCK_HALF_DECR) (lock_type == RW_LOCK_X && lock_word == -X_LOCK_HALF_DECR)
...@@ -1094,7 +1089,7 @@ rw_lock_list_print_info( ...@@ -1094,7 +1089,7 @@ rw_lock_list_print_info(
count++; count++;
if (my_atomic_load32_explicit(const_cast<int32_t*>(&lock->lock_word), MY_MEMORY_ORDER_RELAXED) != X_LOCK_DECR) { if (lock->lock_word.load(std::memory_order_relaxed) != X_LOCK_DECR) {
fprintf(file, "RW-LOCK: %p ", (void*) lock); fprintf(file, "RW-LOCK: %p ", (void*) lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment