Commit 668a5f3d authored by Marko Mäkelä's avatar Marko Mäkelä

MDEV-26720: Optimize single-bit atomic operations on IA-32 and AMD64

This is mostly working around a bad compiler optimization.

The Intel 80386 processor introduced some bit operations that would be
the perfect translation for atomic single-bit read-modify-and-write
operations. Alas, even the latest compilers as of today
(GCC 11, clang 13, Microsoft Visual C 19.29) would generate a loop around
LOCK CMPXCHG instead of emitting the instructions
LOCK BTS (fetch_or()), LOCK BTR (fetch_and()), LOCK BTC (fetch_xor()).

fil_space_t::clear_closing(): Clear the CLOSING flag.

fil_space_t::set_stopping_check(): Special variant of
fil_space_t::set_stopping() that will return the old value
of the STOPPING flag after atomically setting it.

fil_space_t::clear_stopping(): Use fetch_sub() to toggle
the STOPPING flag. The flag is guaranteed to be set upon
calling this function, hence we will toggle it to clear it.
On IA-32 and AMD64, this will translate into
the 80486 LOCK XADD instruction.

fil_space_t::check_pending_operations(): Replace a Boolean
variable with a goto label, to allow more compact code
generation for fil_space_t::set_stopping_check().

trx_rseg_t: Define private accessors ref_set() and ref_reset()
for setting and clearing the flags.

trx_lock_t::clear_deadlock_victim(), trx_lock_t::set_wsrep_victim():
Accessors for clearing and setting the flags.
parent 0144d1d2
...@@ -337,7 +337,7 @@ fil_node_t* fil_space_t::add(const char* name, pfs_os_file_t handle, ...@@ -337,7 +337,7 @@ fil_node_t* fil_space_t::add(const char* name, pfs_os_file_t handle,
this->size += size; this->size += size;
UT_LIST_ADD_LAST(chain, node); UT_LIST_ADD_LAST(chain, node);
if (node->is_open()) { if (node->is_open()) {
n_pending.fetch_and(~CLOSING, std::memory_order_relaxed); clear_closing();
if (++fil_system.n_open >= srv_max_n_open_files) { if (++fil_system.n_open >= srv_max_n_open_files) {
reacquire(); reacquire();
try_to_close(true); try_to_close(true);
...@@ -700,7 +700,7 @@ ATTRIBUTE_COLD bool fil_space_t::prepare(bool have_mutex) ...@@ -700,7 +700,7 @@ ATTRIBUTE_COLD bool fil_space_t::prepare(bool have_mutex)
} }
else else
clear: clear:
n_pending.fetch_and(~CLOSING, std::memory_order_relaxed); clear_closing();
if (!have_mutex) if (!have_mutex)
mysql_mutex_unlock(&fil_system.mutex); mysql_mutex_unlock(&fil_system.mutex);
...@@ -1568,38 +1568,23 @@ fil_name_write( ...@@ -1568,38 +1568,23 @@ fil_name_write(
fil_space_t *fil_space_t::check_pending_operations(ulint id) fil_space_t *fil_space_t::check_pending_operations(ulint id)
{ {
ut_a(!is_system_tablespace(id)); ut_a(!is_system_tablespace(id));
bool being_deleted= false;
mysql_mutex_lock(&fil_system.mutex); mysql_mutex_lock(&fil_system.mutex);
fil_space_t *space= fil_space_get_by_id(id); fil_space_t *space= fil_space_get_by_id(id);
if (!space);
else if (space->pending() & STOPPING)
being_deleted= true;
else
{
if (space->crypt_data)
{
space->reacquire();
mysql_mutex_unlock(&fil_system.mutex);
fil_space_crypt_close_tablespace(space);
mysql_mutex_lock(&fil_system.mutex);
space->release();
}
being_deleted= space->set_stopping();
}
mysql_mutex_unlock(&fil_system.mutex);
if (!space) if (!space)
{
mysql_mutex_unlock(&fil_system.mutex);
return nullptr; return nullptr;
}
if (being_deleted) if (space->pending() & STOPPING)
{ {
being_deleted:
/* A thread executing DDL and another thread executing purge may /* A thread executing DDL and another thread executing purge may
be executing fil_delete_tablespace() concurrently for the same be executing fil_delete_tablespace() concurrently for the same
tablespace. Wait for the other thread to complete the operation. */ tablespace. Wait for the other thread to complete the operation. */
for (ulint count= 0;; count++) for (ulint count= 0;; count++)
{ {
mysql_mutex_lock(&fil_system.mutex);
space= fil_space_get_by_id(id); space= fil_space_get_by_id(id);
ut_ad(!space || space->is_stopping()); ut_ad(!space || space->is_stopping());
mysql_mutex_unlock(&fil_system.mutex); mysql_mutex_unlock(&fil_system.mutex);
...@@ -1610,8 +1595,24 @@ fil_space_t *fil_space_t::check_pending_operations(ulint id) ...@@ -1610,8 +1595,24 @@ fil_space_t *fil_space_t::check_pending_operations(ulint id)
sql_print_warning("InnoDB: Waiting for tablespace " ULINTPF sql_print_warning("InnoDB: Waiting for tablespace " ULINTPF
" to be deleted", id); " to be deleted", id);
std::this_thread::sleep_for(std::chrono::milliseconds(20)); std::this_thread::sleep_for(std::chrono::milliseconds(20));
mysql_mutex_lock(&fil_system.mutex);
} }
} }
else
{
if (space->crypt_data)
{
space->reacquire();
mysql_mutex_unlock(&fil_system.mutex);
fil_space_crypt_close_tablespace(space);
mysql_mutex_lock(&fil_system.mutex);
space->release();
}
if (space->set_stopping_check())
goto being_deleted;
}
mysql_mutex_unlock(&fil_system.mutex);
for (ulint count= 0;; count++) for (ulint count= 0;; count++)
{ {
......
...@@ -18512,7 +18512,7 @@ void lock_wait_wsrep_kill(trx_t *bf_trx, ulong thd_id, trx_id_t trx_id) ...@@ -18512,7 +18512,7 @@ void lock_wait_wsrep_kill(trx_t *bf_trx, ulong thd_id, trx_id_t trx_id)
wsrep_thd_transaction_state_str(vthd), wsrep_thd_transaction_state_str(vthd),
wsrep_thd_query(vthd)); wsrep_thd_query(vthd));
/* Mark transaction as a victim for Galera abort */ /* Mark transaction as a victim for Galera abort */
vtrx->lock.was_chosen_as_deadlock_victim.fetch_or(2); vtrx->lock.set_wsrep_victim();
if (!wsrep_thd_set_wsrep_aborter(bf_thd, vthd)) if (!wsrep_thd_set_wsrep_aborter(bf_thd, vthd))
aborting= true; aborting= true;
else else
...@@ -18567,7 +18567,7 @@ wsrep_abort_transaction( ...@@ -18567,7 +18567,7 @@ wsrep_abort_transaction(
wsrep_thd_transaction_state_str(victim_thd)); wsrep_thd_transaction_state_str(victim_thd));
if (victim_trx) { if (victim_trx) {
victim_trx->lock.was_chosen_as_deadlock_victim.fetch_or(2); victim_trx->lock.set_wsrep_victim();
wsrep_thd_kill_LOCK(victim_thd); wsrep_thd_kill_LOCK(victim_thd);
wsrep_thd_LOCK(victim_thd); wsrep_thd_LOCK(victim_thd);
......
...@@ -511,7 +511,9 @@ struct fil_space_t final ...@@ -511,7 +511,9 @@ struct fil_space_t final
/** Note that operations on the tablespace must stop. /** Note that operations on the tablespace must stop.
@return whether the operations were already stopped */ @return whether the operations were already stopped */
inline bool set_stopping(); inline bool set_stopping_check();
/** Note that operations on the tablespace must stop. */
inline void set_stopping();
/** Note that operations on the tablespace can resume after truncation */ /** Note that operations on the tablespace can resume after truncation */
inline void clear_stopping(); inline void clear_stopping();
...@@ -566,9 +568,35 @@ struct fil_space_t final ...@@ -566,9 +568,35 @@ struct fil_space_t final
/** Clear the NEEDS_FSYNC flag */ /** Clear the NEEDS_FSYNC flag */
void clear_flush() void clear_flush()
{ n_pending.fetch_and(~NEEDS_FSYNC, std::memory_order_release); } {
#if defined __GNUC__ && (defined __i386__ || defined __x86_64__)
static_assert(NEEDS_FSYNC == 1U << 29, "compatibility");
__asm__ __volatile__("lock btrl $29, %0" : "+m" (n_pending));
#elif defined _MSC_VER && (defined _M_IX86 || defined _M_IX64)
static_assert(NEEDS_FSYNC == 1U << 29, "compatibility");
_interlockedbittestandreset(reinterpret_cast<volatile long*>
(&n_pending), 29);
#else
n_pending.fetch_and(~NEEDS_FSYNC, std::memory_order_release);
#endif
}
private: private:
/** Clear the CLOSING flag */
void clear_closing()
{
#if defined __GNUC__ && (defined __i386__ || defined __x86_64__)
static_assert(CLOSING == 1U << 30, "compatibility");
__asm__ __volatile__("lock btrl $30, %0" : "+m" (n_pending));
#elif defined _MSC_VER && (defined _M_IX86 || defined _M_IX64)
static_assert(CLOSING == 1U << 30, "compatibility");
_interlockedbittestandreset(reinterpret_cast<volatile long*>
(&n_pending), 30);
#else
n_pending.fetch_and(~CLOSING, std::memory_order_relaxed);
#endif
}
/** @return pending operations (and flags) */ /** @return pending operations (and flags) */
uint32_t pending()const { return n_pending.load(std::memory_order_acquire); } uint32_t pending()const { return n_pending.load(std::memory_order_acquire); }
public: public:
...@@ -1508,16 +1536,46 @@ inline void fil_space_t::reacquire() ...@@ -1508,16 +1536,46 @@ inline void fil_space_t::reacquire()
/** Note that operations on the tablespace must stop. /** Note that operations on the tablespace must stop.
@return whether the operations were already stopped */ @return whether the operations were already stopped */
inline bool fil_space_t::set_stopping() inline bool fil_space_t::set_stopping_check()
{ {
mysql_mutex_assert_owner(&fil_system.mutex); mysql_mutex_assert_owner(&fil_system.mutex);
#if defined __GNUC__ && (defined __i386__ || defined __x86_64__)
static_assert(STOPPING == 1U << 31, "compatibility");
__asm__ goto("lock btsl $31, %0\t\njnc %l1" : : "m" (n_pending)
: "cc", "memory" : not_stopped);
return true;
not_stopped:
return false;
#elif defined _MSC_VER && (defined _M_IX86 || defined _M_IX64)
static_assert(STOPPING == 1U << 31, "compatibility");
return _interlockedbittestandset(reinterpret_cast<volatile long*>
(&n_pending), 31);
#else
return n_pending.fetch_or(STOPPING, std::memory_order_relaxed) & STOPPING; return n_pending.fetch_or(STOPPING, std::memory_order_relaxed) & STOPPING;
#endif
}
/** Note that operations on the tablespace must stop.
@return whether the operations were already stopped */
inline void fil_space_t::set_stopping()
{
mysql_mutex_assert_owner(&fil_system.mutex);
#if defined __GNUC__ && (defined __i386__ || defined __x86_64__)
static_assert(STOPPING == 1U << 31, "compatibility");
__asm__ __volatile__("lock btsl $31, %0" : "+m" (n_pending));
#elif defined _MSC_VER && (defined _M_IX86 || defined _M_IX64)
static_assert(STOPPING == 1U << 31, "compatibility");
_interlockedbittestandset(reinterpret_cast<volatile long*>(&n_pending), 31);
#else
n_pending.fetch_or(STOPPING, std::memory_order_relaxed);
#endif
} }
inline void fil_space_t::clear_stopping() inline void fil_space_t::clear_stopping()
{ {
mysql_mutex_assert_owner(&fil_system.mutex); mysql_mutex_assert_owner(&fil_system.mutex);
ut_d(auto n=) n_pending.fetch_and(~STOPPING, std::memory_order_relaxed); static_assert(STOPPING == 1U << 31, "compatibility");
ut_d(auto n=) n_pending.fetch_sub(STOPPING, std::memory_order_relaxed);
ut_ad(n & STOPPING); ut_ad(n & STOPPING);
} }
......
...@@ -105,6 +105,43 @@ struct MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) trx_rseg_t ...@@ -105,6 +105,43 @@ struct MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) trx_rseg_t
static constexpr uint32_t REF= 4; static constexpr uint32_t REF= 4;
uint32_t ref_load() const { return ref.load(std::memory_order_relaxed); } uint32_t ref_load() const { return ref.load(std::memory_order_relaxed); }
/** Set a bit in ref */
template<bool needs_purge> void ref_set()
{
static_assert(SKIP == 1U << 0, "compatibility");
static_assert(NEEDS_PURGE == 1U << 1, "compatibility");
#if defined __GNUC__ && (defined __i386__ || defined __x86_64__)
if (needs_purge)
__asm__ __volatile__("lock btsl $1, %0" : "+m" (ref));
else
__asm__ __volatile__("lock btsl $0, %0" : "+m" (ref));
#elif defined _MSC_VER && (defined _M_IX86 || defined _M_IX64)
_interlockedbittestandset(reinterpret_cast<volatile long*>(&ref),
needs_purge);
#else
ref.fetch_or(needs_purge ? NEEDS_PURGE : SKIP, std::memory_order_relaxed);
#endif
}
/** Clear a bit in ref */
template<bool needs_purge> void ref_reset()
{
static_assert(SKIP == 1U << 0, "compatibility");
static_assert(NEEDS_PURGE == 1U << 1, "compatibility");
#if defined __GNUC__ && (defined __i386__ || defined __x86_64__)
if (needs_purge)
__asm__ __volatile__("lock btrl $1, %0" : "+m" (ref));
else
__asm__ __volatile__("lock btrl $0, %0" : "+m" (ref));
#elif defined _MSC_VER && (defined _M_IX86 || defined _M_IX64)
_interlockedbittestandreset(reinterpret_cast<volatile long*>(&ref),
needs_purge);
#else
ref.fetch_and(needs_purge ? ~NEEDS_PURGE : ~SKIP,
std::memory_order_relaxed);
#endif
}
public: public:
/** Initialize the fields that are not zero-initialized. */ /** Initialize the fields that are not zero-initialized. */
...@@ -115,21 +152,22 @@ struct MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) trx_rseg_t ...@@ -115,21 +152,22 @@ struct MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) trx_rseg_t
void destroy(); void destroy();
/** Note that undo tablespace truncation was started. */ /** Note that undo tablespace truncation was started. */
void set_skip_allocation() void set_skip_allocation() { ut_ad(is_persistent()); ref_set<false>(); }
{ ut_ad(is_persistent()); ref.fetch_or(SKIP, std::memory_order_relaxed); }
/** Note that undo tablespace truncation was completed. */ /** Note that undo tablespace truncation was completed. */
void clear_skip_allocation() void clear_skip_allocation()
{ {
ut_ad(is_persistent()); ut_ad(is_persistent());
#if defined DBUG_OFF
ref_reset<false>();
#else
ut_d(auto r=) ref.fetch_and(~SKIP, std::memory_order_relaxed); ut_d(auto r=) ref.fetch_and(~SKIP, std::memory_order_relaxed);
ut_ad(r == SKIP); ut_ad(r == SKIP);
#endif
} }
/** Note that the rollback segment requires purge. */ /** Note that the rollback segment requires purge. */
void set_needs_purge() void set_needs_purge() { ref_set<true>(); }
{ ref.fetch_or(NEEDS_PURGE, std::memory_order_relaxed); }
/** Note that the rollback segment will not require purge. */ /** Note that the rollback segment will not require purge. */
void clear_needs_purge() void clear_needs_purge() { ref_reset<true>(); }
{ ref.fetch_and(~NEEDS_PURGE, std::memory_order_relaxed); }
/** @return whether the segment is marked for undo truncation */ /** @return whether the segment is marked for undo truncation */
bool skip_allocation() const { return ref_load() & SKIP; } bool skip_allocation() const { return ref_load() & SKIP; }
/** @return whether the segment needs purge */ /** @return whether the segment needs purge */
......
...@@ -342,6 +342,38 @@ struct trx_lock_t ...@@ -342,6 +342,38 @@ struct trx_lock_t
1=another transaction chose this as a victim in deadlock resolution. */ 1=another transaction chose this as a victim in deadlock resolution. */
Atomic_relaxed<byte> was_chosen_as_deadlock_victim; Atomic_relaxed<byte> was_chosen_as_deadlock_victim;
/** Clear the deadlock victim status. */
void clear_deadlock_victim()
{
#ifndef WITH_WSREP
was_chosen_as_deadlock_victim= false;
#elif defined __GNUC__ && (defined __i386__ || defined __x86_64__)
/* There is no 8-bit version of the 80386 BTR instruction.
Technically, this is the wrong addressing mode (16-bit), but
there are other data members stored after the byte. */
__asm__ __volatile__("lock btrw $0, %0"
: "+m" (was_chosen_as_deadlock_victim));
#else
was_chosen_as_deadlock_victim.fetch_and(byte(~1));
#endif
}
#ifdef WITH_WSREP
/** Flag the lock owner as a victim in Galera conflict resolution. */
void set_wsrep_victim()
{
# if defined __GNUC__ && (defined __i386__ || defined __x86_64__)
/* There is no 8-bit version of the 80386 BTS instruction.
Technically, this is the wrong addressing mode (16-bit), but
there are other data members stored after the byte. */
__asm__ __volatile__("lock btsw $1, %0"
: "+m" (was_chosen_as_deadlock_victim));
# else
was_chosen_as_deadlock_victim.fetch_or(2);
# endif
}
#endif
/** Next available rec_pool[] entry */ /** Next available rec_pool[] entry */
byte rec_cached; byte rec_cached;
/** Next available table_pool[] entry */ /** Next available table_pool[] entry */
......
...@@ -1282,8 +1282,7 @@ lock_rec_enqueue_waiting( ...@@ -1282,8 +1282,7 @@ lock_rec_enqueue_waiting(
} }
trx->lock.wait_thr = thr; trx->lock.wait_thr = thr;
trx->lock.was_chosen_as_deadlock_victim trx->lock.clear_deadlock_victim();
IF_WSREP(.fetch_and(byte(~1)), = false);
DBUG_LOG("ib_lock", "trx " << ib::hex(trx->id) DBUG_LOG("ib_lock", "trx " << ib::hex(trx->id)
<< " waits for lock in index " << index->name << " waits for lock in index " << index->name
...@@ -3333,8 +3332,7 @@ lock_table_enqueue_waiting( ...@@ -3333,8 +3332,7 @@ lock_table_enqueue_waiting(
lock_table_create(table, mode | LOCK_WAIT, trx, c_lock); lock_table_create(table, mode | LOCK_WAIT, trx, c_lock);
trx->lock.wait_thr = thr; trx->lock.wait_thr = thr;
trx->lock.was_chosen_as_deadlock_victim trx->lock.clear_deadlock_victim();
IF_WSREP(.fetch_and(byte(~1)), = false);
MONITOR_INC(MONITOR_TABLELOCK_WAIT); MONITOR_INC(MONITOR_TABLELOCK_WAIT);
return(DB_LOCK_WAIT); return(DB_LOCK_WAIT);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment