Commit 277ba134 authored by Marko Mäkelä's avatar Marko Mäkelä

MDEV-26467: Avoid futile spin loops

Typically, index_lock and fil_space_t::latch will be held for a longer
time than the spin loop in latch acquisition would be waiting for.
Let us avoid spin loops for those as well as dict_sys.latch, which
could be held in exclusive mode for a longer time (while loading
metadata into the buffer pool and the dictionary cache).

Performance testing on a dual Intel Xeon E5-2630 v4 (2 NUMA nodes)
suggests that the buffer pool page latch (block_lock) benefits from a
spin loop in both read-only and read-write workloads where the working
set is slightly larger than the buffer pool. Presumably, most contention
would occur on leaf page latches. Contention on upper level pages in
the buffer pool should intuitively last longer.

We introduce srw_spin_lock and srw_spin_mutex to allow users of
srw_lock or srw_mutex to opt in for the spin loop.
On Microsoft Windows, a spin loop variant was and will not be available;
srw_mutex and srw_lock will simply wrap SRWLOCK.
That is, on Microsoft Windows, the parameters innodb_sync_spin_loops
and innodb_spin_wait_delay will only affect block_lock.
parent 0f0b7e47
...@@ -1247,7 +1247,7 @@ btr_cur_search_to_nth_level_func( ...@@ -1247,7 +1247,7 @@ btr_cur_search_to_nth_level_func(
btr_cur_t* cursor, /*!< in/out: tree cursor; the cursor page is btr_cur_t* cursor, /*!< in/out: tree cursor; the cursor page is
s- or x-latched, but see also above! */ s- or x-latched, but see also above! */
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
srw_lock* ahi_latch, srw_spin_lock* ahi_latch,
/*!< in: currently held AHI rdlock, or NULL */ /*!< in: currently held AHI rdlock, or NULL */
#endif /* BTR_CUR_HASH_ADAPT */ #endif /* BTR_CUR_HASH_ADAPT */
mtr_t* mtr, /*!< in: mtr */ mtr_t* mtr, /*!< in: mtr */
...@@ -3611,7 +3611,7 @@ btr_cur_optimistic_insert( ...@@ -3611,7 +3611,7 @@ btr_cur_optimistic_insert(
ut_ad(flags == BTR_NO_LOCKING_FLAG); ut_ad(flags == BTR_NO_LOCKING_FLAG);
} else if (index->table->is_temporary()) { } else if (index->table->is_temporary()) {
} else { } else {
srw_lock* ahi_latch = btr_search_sys.get_latch(*index); srw_spin_lock* ahi_latch = btr_search_sys.get_latch(*index);
if (!reorg && cursor->flag == BTR_CUR_HASH) { if (!reorg && cursor->flag == BTR_CUR_HASH) {
btr_search_update_hash_node_on_insert( btr_search_update_hash_node_on_insert(
cursor, ahi_latch); cursor, ahi_latch);
...@@ -4331,7 +4331,7 @@ btr_cur_update_in_place( ...@@ -4331,7 +4331,7 @@ btr_cur_update_in_place(
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
{ {
srw_lock* ahi_latch = block->index srw_spin_lock* ahi_latch = block->index
? btr_search_sys.get_latch(*index) : NULL; ? btr_search_sys.get_latch(*index) : NULL;
if (ahi_latch) { if (ahi_latch) {
/* TO DO: Can we skip this if none of the fields /* TO DO: Can we skip this if none of the fields
......
...@@ -1015,7 +1015,7 @@ btr_search_guess_on_hash( ...@@ -1015,7 +1015,7 @@ btr_search_guess_on_hash(
ulint mode, ulint mode,
ulint latch_mode, ulint latch_mode,
btr_cur_t* cursor, btr_cur_t* cursor,
srw_lock* ahi_latch, srw_spin_lock* ahi_latch,
mtr_t* mtr) mtr_t* mtr)
{ {
ulint fold; ulint fold;
...@@ -1460,7 +1460,7 @@ void ...@@ -1460,7 +1460,7 @@ void
btr_search_build_page_hash_index( btr_search_build_page_hash_index(
dict_index_t* index, dict_index_t* index,
buf_block_t* block, buf_block_t* block,
srw_lock* ahi_latch, srw_spin_lock* ahi_latch,
uint16_t n_fields, uint16_t n_fields,
uint16_t n_bytes, uint16_t n_bytes,
bool left_side) bool left_side)
...@@ -1660,7 +1660,7 @@ btr_search_build_page_hash_index( ...@@ -1660,7 +1660,7 @@ btr_search_build_page_hash_index(
@param[in,out] cursor cursor which was just positioned */ @param[in,out] cursor cursor which was just positioned */
void btr_search_info_update_slow(btr_search_t *info, btr_cur_t *cursor) void btr_search_info_update_slow(btr_search_t *info, btr_cur_t *cursor)
{ {
srw_lock* ahi_latch = &btr_search_sys.get_part(*cursor->index) srw_spin_lock* ahi_latch = &btr_search_sys.get_part(*cursor->index)
->latch; ->latch;
buf_block_t* block = btr_cur_get_block(cursor); buf_block_t* block = btr_cur_get_block(cursor);
...@@ -1727,7 +1727,7 @@ btr_search_move_or_delete_hash_entries( ...@@ -1727,7 +1727,7 @@ btr_search_move_or_delete_hash_entries(
assert_block_ahi_valid(block); assert_block_ahi_valid(block);
assert_block_ahi_valid(new_block); assert_block_ahi_valid(new_block);
srw_lock* ahi_latch = index srw_spin_lock* ahi_latch = index
? &btr_search_sys.get_part(*index)->latch ? &btr_search_sys.get_part(*index)->latch
: nullptr; : nullptr;
...@@ -1852,7 +1852,7 @@ void btr_search_update_hash_on_delete(btr_cur_t *cursor) ...@@ -1852,7 +1852,7 @@ void btr_search_update_hash_on_delete(btr_cur_t *cursor)
inserted next to the cursor. inserted next to the cursor.
@param[in] ahi_latch the adaptive hash index latch */ @param[in] ahi_latch the adaptive hash index latch */
void btr_search_update_hash_node_on_insert(btr_cur_t *cursor, void btr_search_update_hash_node_on_insert(btr_cur_t *cursor,
srw_lock *ahi_latch) srw_spin_lock *ahi_latch)
{ {
buf_block_t* block; buf_block_t* block;
dict_index_t* index; dict_index_t* index;
...@@ -1925,7 +1925,7 @@ void btr_search_update_hash_node_on_insert(btr_cur_t *cursor, ...@@ -1925,7 +1925,7 @@ void btr_search_update_hash_node_on_insert(btr_cur_t *cursor,
to the cursor to the cursor
@param[in] ahi_latch the adaptive hash index latch */ @param[in] ahi_latch the adaptive hash index latch */
void btr_search_update_hash_on_insert(btr_cur_t *cursor, void btr_search_update_hash_on_insert(btr_cur_t *cursor,
srw_lock *ahi_latch) srw_spin_lock *ahi_latch)
{ {
buf_block_t* block; buf_block_t* block;
dict_index_t* index; dict_index_t* index;
......
/***************************************************************************** /*****************************************************************************
Copyright (c) 1994, 2019, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 1994, 2019, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2017, 2020, MariaDB Corporation. Copyright (c) 2017, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
...@@ -201,7 +201,7 @@ btr_cur_search_to_nth_level_func( ...@@ -201,7 +201,7 @@ btr_cur_search_to_nth_level_func(
btr_cur_t* cursor, /*!< in/out: tree cursor; the cursor page is btr_cur_t* cursor, /*!< in/out: tree cursor; the cursor page is
s- or x-latched, but see also above! */ s- or x-latched, but see also above! */
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
srw_lock* ahi_latch, srw_spin_lock* ahi_latch,
/*!< in: currently held AHI rdlock, or NULL */ /*!< in: currently held AHI rdlock, or NULL */
#endif /* BTR_CUR_HASH_ADAPT */ #endif /* BTR_CUR_HASH_ADAPT */
mtr_t* mtr, /*!< in/out: mini-transaction */ mtr_t* mtr, /*!< in/out: mini-transaction */
......
...@@ -135,7 +135,7 @@ btr_pcur_open_with_no_init_func( ...@@ -135,7 +135,7 @@ btr_pcur_open_with_no_init_func(
that the ahi_latch protects the record! */ that the ahi_latch protects the record! */
btr_pcur_t* cursor, /*!< in: memory buffer for persistent cursor */ btr_pcur_t* cursor, /*!< in: memory buffer for persistent cursor */
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
srw_lock* ahi_latch, srw_spin_lock* ahi_latch,
/*!< in: currently held AHI rdlock, or NULL */ /*!< in: currently held AHI rdlock, or NULL */
#endif /* BTR_CUR_HASH_ADAPT */ #endif /* BTR_CUR_HASH_ADAPT */
mtr_t* mtr); /*!< in: mtr */ mtr_t* mtr); /*!< in: mtr */
......
/***************************************************************************** /*****************************************************************************
Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved. Copyright (c) 1996, 2015, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2015, 2020, MariaDB Corporation. Copyright (c) 2015, 2021, MariaDB Corporation.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
...@@ -479,7 +479,7 @@ btr_pcur_open_with_no_init_func( ...@@ -479,7 +479,7 @@ btr_pcur_open_with_no_init_func(
that the ahi_latch protects the record! */ that the ahi_latch protects the record! */
btr_pcur_t* cursor, /*!< in: memory buffer for persistent cursor */ btr_pcur_t* cursor, /*!< in: memory buffer for persistent cursor */
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
srw_lock* ahi_latch, srw_spin_lock* ahi_latch,
/*!< in: currently held AHI rdlock, or NULL */ /*!< in: currently held AHI rdlock, or NULL */
#endif /* BTR_CUR_HASH_ADAPT */ #endif /* BTR_CUR_HASH_ADAPT */
mtr_t* mtr) /*!< in: mtr */ mtr_t* mtr) /*!< in: mtr */
......
...@@ -81,7 +81,7 @@ btr_search_guess_on_hash( ...@@ -81,7 +81,7 @@ btr_search_guess_on_hash(
ulint mode, ulint mode,
ulint latch_mode, ulint latch_mode,
btr_cur_t* cursor, btr_cur_t* cursor,
srw_lock* ahi_latch, srw_spin_lock* ahi_latch,
mtr_t* mtr); mtr_t* mtr);
/** Move or delete hash entries for moved records, usually in a page split. /** Move or delete hash entries for moved records, usually in a page split.
...@@ -114,7 +114,7 @@ void btr_search_drop_page_hash_when_freed(const page_id_t page_id); ...@@ -114,7 +114,7 @@ void btr_search_drop_page_hash_when_freed(const page_id_t page_id);
inserted next to the cursor. inserted next to the cursor.
@param[in] ahi_latch the adaptive hash index latch */ @param[in] ahi_latch the adaptive hash index latch */
void btr_search_update_hash_node_on_insert(btr_cur_t *cursor, void btr_search_update_hash_node_on_insert(btr_cur_t *cursor,
srw_lock *ahi_latch); srw_spin_lock *ahi_latch);
/** Updates the page hash index when a single record is inserted on a page. /** Updates the page hash index when a single record is inserted on a page.
@param[in,out] cursor cursor which was positioned to the @param[in,out] cursor cursor which was positioned to the
...@@ -123,7 +123,7 @@ void btr_search_update_hash_node_on_insert(btr_cur_t *cursor, ...@@ -123,7 +123,7 @@ void btr_search_update_hash_node_on_insert(btr_cur_t *cursor,
to the cursor to the cursor
@param[in] ahi_latch the adaptive hash index latch */ @param[in] ahi_latch the adaptive hash index latch */
void btr_search_update_hash_on_insert(btr_cur_t *cursor, void btr_search_update_hash_on_insert(btr_cur_t *cursor,
srw_lock *ahi_latch); srw_spin_lock *ahi_latch);
/** Updates the page hash index when a single record is deleted from a page. /** Updates the page hash index when a single record is deleted from a page.
@param[in] cursor cursor which was positioned on the record to delete @param[in] cursor cursor which was positioned on the record to delete
...@@ -237,7 +237,7 @@ struct btr_search_sys_t ...@@ -237,7 +237,7 @@ struct btr_search_sys_t
struct partition struct partition
{ {
/** latches protecting hash_table */ /** latches protecting hash_table */
srw_lock latch; srw_spin_lock latch;
/** mapping of dtuple_fold() to rec_t* in buf_block_t::frame */ /** mapping of dtuple_fold() to rec_t* in buf_block_t::frame */
hash_table_t table; hash_table_t table;
/** memory heap for table */ /** memory heap for table */
...@@ -249,8 +249,8 @@ struct btr_search_sys_t ...@@ -249,8 +249,8 @@ struct btr_search_sys_t
#pragma warning(disable : 4200) #pragma warning(disable : 4200)
#endif #endif
char pad[(CPU_LEVEL1_DCACHE_LINESIZE - sizeof(srw_lock) - char pad[(CPU_LEVEL1_DCACHE_LINESIZE - sizeof latch -
sizeof(hash_table_t) - sizeof(mem_heap_t)) & sizeof table - sizeof heap) &
(CPU_LEVEL1_DCACHE_LINESIZE - 1)]; (CPU_LEVEL1_DCACHE_LINESIZE - 1)];
#ifdef _MSC_VER #ifdef _MSC_VER
...@@ -306,7 +306,7 @@ struct btr_search_sys_t ...@@ -306,7 +306,7 @@ struct btr_search_sys_t
} }
/** Get the search latch for the adaptive hash index partition */ /** Get the search latch for the adaptive hash index partition */
srw_lock *get_latch(const dict_index_t &index) const srw_spin_lock *get_latch(const dict_index_t &index) const
{ return &get_part(index)->latch; } { return &get_part(index)->latch; }
/** Create and initialize at startup */ /** Create and initialize at startup */
...@@ -351,7 +351,7 @@ inline ulint dict_index_t::n_ahi_pages() const ...@@ -351,7 +351,7 @@ inline ulint dict_index_t::n_ahi_pages() const
{ {
if (!btr_search_enabled) if (!btr_search_enabled)
return 0; return 0;
srw_lock *latch= &btr_search_sys.get_part(*this)->latch; srw_spin_lock *latch= &btr_search_sys.get_part(*this)->latch;
latch->rd_lock(SRW_LOCK_CALL); latch->rd_lock(SRW_LOCK_CALL);
ulint ref_count= search_info->ref_count; ulint ref_count= search_info->ref_count;
latch->rd_unlock(); latch->rd_unlock();
......
...@@ -2268,10 +2268,10 @@ struct dict_table_t { ...@@ -2268,10 +2268,10 @@ struct dict_table_t {
lock_t* autoinc_lock; lock_t* autoinc_lock;
/** Mutex protecting autoinc and freed_indexes. */ /** Mutex protecting autoinc and freed_indexes. */
srw_mutex autoinc_mutex; srw_spin_mutex autoinc_mutex;
private: private:
/** Mutex protecting locks on this table. */ /** Mutex protecting locks on this table. */
srw_mutex lock_mutex; srw_spin_mutex lock_mutex;
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
/** The owner of lock_mutex (0 if none) */ /** The owner of lock_mutex (0 if none) */
Atomic_relaxed<os_thread_id_t> lock_mutex_owner{0}; Atomic_relaxed<os_thread_id_t> lock_mutex_owner{0};
......
...@@ -584,7 +584,7 @@ class lock_sys_t ...@@ -584,7 +584,7 @@ class lock_sys_t
#else #else
{ {
private: private:
srw_lock_low lock; srw_spin_lock_low lock;
public: public:
/** Try to acquire a lock */ /** Try to acquire a lock */
bool try_acquire() { return lock.wr_lock_try(); } bool try_acquire() { return lock.wr_lock_try(); }
...@@ -666,7 +666,7 @@ class lock_sys_t ...@@ -666,7 +666,7 @@ class lock_sys_t
bool m_initialised; bool m_initialised;
/** mutex proteting the locks */ /** mutex proteting the locks */
MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) srw_lock latch; MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) srw_spin_lock latch;
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
/** The owner of exclusive latch (0 if none); protected by latch */ /** The owner of exclusive latch (0 if none); protected by latch */
std::atomic<os_thread_id_t> writer{0}; std::atomic<os_thread_id_t> writer{0};
......
...@@ -124,8 +124,8 @@ class rw_lock ...@@ -124,8 +124,8 @@ class rw_lock
} }
DBUG_ASSERT((l & ~WRITER_WAITING) == UPDATER); DBUG_ASSERT((l & ~WRITER_WAITING) == UPDATER);
/* Any thread that had set WRITER_WAITING will eventually be woken /* Any thread that had set WRITER_WAITING will eventually be woken
up by ssux_lock_low::x_unlock() or ssux_lock_low::u_unlock() up by ssux_lock_impl::x_unlock() or ssux_lock_impl::u_unlock()
(not ssux_lock_low::wr_u_downgrade() to keep the code simple). */ (not ssux_lock_impl::wr_u_downgrade() to keep the code simple). */
return true; return true;
} }
/** Downgrade an exclusive lock to an update lock. */ /** Downgrade an exclusive lock to an update lock. */
......
...@@ -32,9 +32,11 @@ class srw_mutex final ...@@ -32,9 +32,11 @@ class srw_mutex final
void wr_unlock() { pthread_mutex_unlock(&lock); } void wr_unlock() { pthread_mutex_unlock(&lock); }
bool wr_lock_try() { return !pthread_mutex_trylock(&lock); } bool wr_lock_try() { return !pthread_mutex_trylock(&lock); }
}; };
typedef srw_mutex srw_spin_mutex;
#else #else
/** Futex-based mutex */ /** Futex-based mutex */
class srw_mutex final template<bool spinloop>
class srw_mutex_impl final
{ {
/** The lock word, containing HOLDER + 1 if the lock is being held, /** The lock word, containing HOLDER + 1 if the lock is being held,
plus the number of waiters */ plus the number of waiters */
...@@ -79,10 +81,19 @@ class srw_mutex final ...@@ -79,10 +81,19 @@ class srw_mutex final
} }
} }
}; };
typedef srw_mutex_impl<true> srw_spin_mutex;
typedef srw_mutex_impl<false> srw_mutex;
#endif
# if defined _WIN32 || defined SUX_LOCK_GENERIC
# else
template<bool spinlock> class srw_lock_impl;
#endif #endif
/** Slim shared-update-exclusive lock with no recursion */ /** Slim shared-update-exclusive lock with no recursion */
class ssux_lock_low final template<bool spinloop>
class ssux_lock_impl final
#ifdef SUX_LOCK_GENERIC #ifdef SUX_LOCK_GENERIC
: private rw_lock : private rw_lock
#endif #endif
...@@ -92,7 +103,7 @@ class ssux_lock_low final ...@@ -92,7 +103,7 @@ class ssux_lock_low final
# ifdef SUX_LOCK_GENERIC # ifdef SUX_LOCK_GENERIC
# elif defined _WIN32 # elif defined _WIN32
# else # else
friend class srw_lock; friend srw_lock_impl<spinloop>;
# endif # endif
#endif #endif
#ifdef SUX_LOCK_GENERIC #ifdef SUX_LOCK_GENERIC
...@@ -259,7 +270,7 @@ class ssux_lock_low final ...@@ -259,7 +270,7 @@ class ssux_lock_low final
class srw_lock_low class srw_lock_low
{ {
# ifdef UNIV_PFS_RWLOCK # ifdef UNIV_PFS_RWLOCK
friend class srw_lock; friend class srw_lock_impl;
# endif # endif
SRWLOCK lock; SRWLOCK lock;
public: public:
...@@ -272,12 +283,14 @@ class srw_lock_low ...@@ -272,12 +283,14 @@ class srw_lock_low
bool wr_lock_try() { return TryAcquireSRWLockExclusive(&lock); } bool wr_lock_try() { return TryAcquireSRWLockExclusive(&lock); }
void wr_unlock() { ReleaseSRWLockExclusive(&lock); } void wr_unlock() { ReleaseSRWLockExclusive(&lock); }
}; };
typedef srw_lock_low srw_spin_lock_low;
#elif defined SUX_LOCK_GENERIC #elif defined SUX_LOCK_GENERIC
/** Slim read-write lock */ /** Slim read-write lock */
class srw_lock_low class srw_lock_low
{ {
# ifdef UNIV_PFS_RWLOCK # ifdef UNIV_PFS_RWLOCK
friend class srw_lock; friend class srw_lock_impl;
# endif # endif
rw_lock_t lock; rw_lock_t lock;
public: public:
...@@ -290,8 +303,10 @@ class srw_lock_low ...@@ -290,8 +303,10 @@ class srw_lock_low
bool wr_lock_try() { return !rw_trywrlock(&lock); } bool wr_lock_try() { return !rw_trywrlock(&lock); }
void wr_unlock() { rw_unlock(&lock); } void wr_unlock() { rw_unlock(&lock); }
}; };
typedef srw_lock_low srw_spin_lock_low;
#else #else
typedef ssux_lock_low srw_lock_low; typedef ssux_lock_impl<false> srw_lock_low;
typedef ssux_lock_impl<true> srw_spin_lock_low;
#endif #endif
#ifndef UNIV_PFS_RWLOCK #ifndef UNIV_PFS_RWLOCK
...@@ -299,7 +314,7 @@ typedef ssux_lock_low srw_lock_low; ...@@ -299,7 +314,7 @@ typedef ssux_lock_low srw_lock_low;
# define SRW_LOCK_ARGS(file, line) /* nothing */ # define SRW_LOCK_ARGS(file, line) /* nothing */
# define SRW_LOCK_CALL /* nothing */ # define SRW_LOCK_CALL /* nothing */
typedef srw_lock_low srw_lock; typedef srw_lock_low srw_lock;
typedef ssux_lock_low ssux_lock; typedef srw_spin_lock_low srw_spin_lock;
#else #else
# define SRW_LOCK_INIT(key) init(key) # define SRW_LOCK_INIT(key) init(key)
# define SRW_LOCK_ARGS(file, line) file, line # define SRW_LOCK_ARGS(file, line) file, line
...@@ -309,7 +324,7 @@ typedef ssux_lock_low ssux_lock; ...@@ -309,7 +324,7 @@ typedef ssux_lock_low ssux_lock;
class ssux_lock class ssux_lock
{ {
PSI_rwlock *pfs_psi; PSI_rwlock *pfs_psi;
ssux_lock_low lock; ssux_lock_impl<false> lock;
ATTRIBUTE_NOINLINE void psi_rd_lock(const char *file, unsigned line); ATTRIBUTE_NOINLINE void psi_rd_lock(const char *file, unsigned line);
ATTRIBUTE_NOINLINE void psi_wr_lock(const char *file, unsigned line); ATTRIBUTE_NOINLINE void psi_wr_lock(const char *file, unsigned line);
...@@ -383,10 +398,18 @@ class ssux_lock ...@@ -383,10 +398,18 @@ class ssux_lock
}; };
/** Slim reader-writer lock with PERFORMANCE_SCHEMA instrumentation */ /** Slim reader-writer lock with PERFORMANCE_SCHEMA instrumentation */
class srw_lock # if defined _WIN32 || defined SUX_LOCK_GENERIC
# else
template<bool spinlock>
# endif
class srw_lock_impl
{ {
PSI_rwlock *pfs_psi; PSI_rwlock *pfs_psi;
# if defined _WIN32 || defined SUX_LOCK_GENERIC
srw_lock_low lock; srw_lock_low lock;
# else
ssux_lock_impl<spinlock> lock;
# endif
ATTRIBUTE_NOINLINE void psi_rd_lock(const char *file, unsigned line); ATTRIBUTE_NOINLINE void psi_rd_lock(const char *file, unsigned line);
ATTRIBUTE_NOINLINE void psi_wr_lock(const char *file, unsigned line); ATTRIBUTE_NOINLINE void psi_wr_lock(const char *file, unsigned line);
...@@ -434,4 +457,13 @@ class srw_lock ...@@ -434,4 +457,13 @@ class srw_lock
bool rd_lock_try() { return lock.rd_lock_try(); } bool rd_lock_try() { return lock.rd_lock_try(); }
bool wr_lock_try() { return lock.wr_lock_try(); } bool wr_lock_try() { return lock.wr_lock_try(); }
}; };
# if defined _WIN32 || defined SUX_LOCK_GENERIC
typedef srw_lock_impl srw_lock;
typedef srw_lock_impl srw_spin_lock;
# else
typedef srw_lock_impl<false> srw_lock;
typedef srw_lock_impl<true> srw_spin_lock;
# endif
#endif #endif
...@@ -27,12 +27,12 @@ this program; if not, write to the Free Software Foundation, Inc., ...@@ -27,12 +27,12 @@ this program; if not, write to the Free Software Foundation, Inc.,
/** A "fat" rw-lock that supports /** A "fat" rw-lock that supports
S (shared), U (update, or shared-exclusive), and X (exclusive) modes S (shared), U (update, or shared-exclusive), and X (exclusive) modes
as well as recursive U and X latch acquisition as well as recursive U and X latch acquisition
@tparam srw ssux_lock_low or ssux_lock */ @tparam ssux ssux_lock_impl or ssux_lock */
template<typename srw> template<typename ssux>
class sux_lock final class sux_lock final
{ {
/** The underlying non-recursive lock */ /** The underlying non-recursive lock */
srw lock; ssux lock;
/** Numbers of U and X locks. Protected by lock. */ /** Numbers of U and X locks. Protected by lock. */
uint32_t recursive; uint32_t recursive;
/** The owner of the U or X lock (0 if none); protected by lock */ /** The owner of the U or X lock (0 if none); protected by lock */
...@@ -270,20 +270,14 @@ class sux_lock final ...@@ -270,20 +270,14 @@ class sux_lock final
bool is_waiting() const { return lock.is_waiting(); } bool is_waiting() const { return lock.is_waiting(); }
}; };
/** needed for dict_index_t::clone() */ typedef sux_lock<ssux_lock_impl<true>> block_lock;
template<> inline void sux_lock<ssux_lock>::operator=(const sux_lock&)
{
memset((void*) this, 0, sizeof *this);
}
typedef sux_lock<ssux_lock_low> block_lock;
#ifndef UNIV_PFS_RWLOCK #ifndef UNIV_PFS_RWLOCK
typedef block_lock index_lock; typedef sux_lock<ssux_lock_impl<false>> index_lock;
#else #else
typedef sux_lock<ssux_lock> index_lock; typedef sux_lock<ssux_lock> index_lock;
template<> inline void sux_lock<ssux_lock_low>::init() template<> inline void sux_lock<ssux_lock_impl<true>>::init()
{ {
lock.init(); lock.init();
ut_ad(!writer.load(std::memory_order_relaxed)); ut_ad(!writer.load(std::memory_order_relaxed));
...@@ -340,8 +334,13 @@ inline void sux_lock<ssux_lock>::u_x_upgrade(const char *file, unsigned line) ...@@ -340,8 +334,13 @@ inline void sux_lock<ssux_lock>::u_x_upgrade(const char *file, unsigned line)
} }
#endif #endif
template<> /** needed for dict_index_t::clone() */
inline void sux_lock<ssux_lock_low>::s_lock() template<> inline void index_lock::operator=(const sux_lock&)
{
memset((void*) this, 0, sizeof *this);
}
template<typename ssux> inline void sux_lock<ssux>::s_lock()
{ {
ut_ad(!have_x()); ut_ad(!have_x());
ut_ad(!have_s()); ut_ad(!have_s());
...@@ -349,8 +348,7 @@ inline void sux_lock<ssux_lock_low>::s_lock() ...@@ -349,8 +348,7 @@ inline void sux_lock<ssux_lock_low>::s_lock()
ut_d(s_lock_register()); ut_d(s_lock_register());
} }
template<> template<typename ssux> inline void sux_lock<ssux>::u_lock()
inline void sux_lock<ssux_lock_low>::u_lock()
{ {
os_thread_id_t id= os_thread_get_curr_id(); os_thread_id_t id= os_thread_get_curr_id();
if (writer.load(std::memory_order_relaxed) == id) if (writer.load(std::memory_order_relaxed) == id)
...@@ -364,8 +362,7 @@ inline void sux_lock<ssux_lock_low>::u_lock() ...@@ -364,8 +362,7 @@ inline void sux_lock<ssux_lock_low>::u_lock()
} }
} }
template<> template<typename ssux> inline void sux_lock<ssux>::x_lock(bool for_io)
inline void sux_lock<ssux_lock_low>::x_lock(bool for_io)
{ {
os_thread_id_t id= os_thread_get_curr_id(); os_thread_id_t id= os_thread_get_curr_id();
if (writer.load(std::memory_order_relaxed) == id) if (writer.load(std::memory_order_relaxed) == id)
...@@ -382,15 +379,14 @@ inline void sux_lock<ssux_lock_low>::x_lock(bool for_io) ...@@ -382,15 +379,14 @@ inline void sux_lock<ssux_lock_low>::x_lock(bool for_io)
} }
} }
template<> template<typename ssux> inline void sux_lock<ssux>::u_x_upgrade()
inline void sux_lock<ssux_lock_low>::u_x_upgrade()
{ {
ut_ad(have_u_not_x()); ut_ad(have_u_not_x());
lock.u_wr_upgrade(); lock.u_wr_upgrade();
recursive/= RECURSIVE_U; recursive/= RECURSIVE_U;
} }
template<> inline bool sux_lock<ssux_lock_low>::x_lock_upgraded() template<typename ssux> inline bool sux_lock<ssux>::x_lock_upgraded()
{ {
os_thread_id_t id= os_thread_get_curr_id(); os_thread_id_t id= os_thread_get_curr_id();
if (writer.load(std::memory_order_relaxed) == id) if (writer.load(std::memory_order_relaxed) == id)
...@@ -417,8 +413,7 @@ template<> inline bool sux_lock<ssux_lock_low>::x_lock_upgraded() ...@@ -417,8 +413,7 @@ template<> inline bool sux_lock<ssux_lock_low>::x_lock_upgraded()
} }
} }
template<> template<typename ssux> inline bool sux_lock<ssux>::u_lock_try(bool for_io)
inline bool sux_lock<ssux_lock_low>::u_lock_try(bool for_io)
{ {
os_thread_id_t id= os_thread_get_curr_id(); os_thread_id_t id= os_thread_get_curr_id();
if (writer.load(std::memory_order_relaxed) == id) if (writer.load(std::memory_order_relaxed) == id)
...@@ -438,8 +433,7 @@ inline bool sux_lock<ssux_lock_low>::u_lock_try(bool for_io) ...@@ -438,8 +433,7 @@ inline bool sux_lock<ssux_lock_low>::u_lock_try(bool for_io)
return false; return false;
} }
template<> template<typename ssux> inline bool sux_lock<ssux>::x_lock_try()
inline bool sux_lock<ssux_lock_low>::x_lock_try()
{ {
os_thread_id_t id= os_thread_get_curr_id(); os_thread_id_t id= os_thread_get_curr_id();
if (writer.load(std::memory_order_relaxed) == id) if (writer.load(std::memory_order_relaxed) == id)
......
...@@ -125,7 +125,7 @@ class purge_sys_t ...@@ -125,7 +125,7 @@ class purge_sys_t
{ {
public: public:
/** latch protecting view, m_enabled */ /** latch protecting view, m_enabled */
MY_ALIGNED(CACHE_LINE_SIZE) mutable srw_lock latch; MY_ALIGNED(CACHE_LINE_SIZE) mutable srw_spin_lock latch;
private: private:
/** The purge will not remove undo logs which are >= this view */ /** The purge will not remove undo logs which are >= this view */
ReadViewBase view; ReadViewBase view;
......
...@@ -84,7 +84,7 @@ struct MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) trx_rseg_t ...@@ -84,7 +84,7 @@ struct MY_ALIGNED(CPU_LEVEL1_DCACHE_LINESIZE) trx_rseg_t
/** tablespace containing the rollback segment; constant after init() */ /** tablespace containing the rollback segment; constant after init() */
fil_space_t *space; fil_space_t *space;
/** latch protecting everything except page_no, space */ /** latch protecting everything except page_no, space */
srw_lock_low latch; srw_spin_lock_low latch;
/** rollback segment header page number; constant after init() */ /** rollback segment header page number; constant after init() */
uint32_t page_no; uint32_t page_no;
/** length of the TRX_RSEG_HISTORY list (number of transactions) */ /** length of the TRX_RSEG_HISTORY list (number of transactions) */
......
...@@ -565,7 +565,7 @@ struct trx_t : ilist_node<> ...@@ -565,7 +565,7 @@ struct trx_t : ilist_node<>
private: private:
/** mutex protecting state and some of lock /** mutex protecting state and some of lock
(some are protected by lock_sys.latch) */ (some are protected by lock_sys.latch) */
srw_mutex mutex; srw_spin_mutex mutex;
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
/** The owner of mutex (0 if none); protected by mutex */ /** The owner of mutex (0 if none); protected by mutex */
std::atomic<os_thread_id_t> mutex_owner{0}; std::atomic<os_thread_id_t> mutex_owner{0};
......
...@@ -3930,7 +3930,7 @@ row_sel_try_search_shortcut_for_mysql( ...@@ -3930,7 +3930,7 @@ row_sel_try_search_shortcut_for_mysql(
ut_ad(!prebuilt->templ_contains_blob); ut_ad(!prebuilt->templ_contains_blob);
ut_ad(trx->read_view.is_open()); ut_ad(trx->read_view.is_open());
srw_lock* ahi_latch = btr_search_sys.get_latch(*index); srw_spin_lock* ahi_latch = btr_search_sys.get_latch(*index);
ahi_latch->rd_lock(SRW_LOCK_CALL); ahi_latch->rd_lock(SRW_LOCK_CALL);
btr_pcur_open_with_no_init(index, search_tuple, PAGE_CUR_GE, btr_pcur_open_with_no_init(index, search_tuple, PAGE_CUR_GE,
BTR_SEARCH_LEAF, pcur, ahi_latch, mtr); BTR_SEARCH_LEAF, pcur, ahi_latch, mtr);
......
...@@ -36,7 +36,8 @@ static inline void srw_pause(unsigned delay) ...@@ -36,7 +36,8 @@ static inline void srw_pause(unsigned delay)
} }
#ifdef SUX_LOCK_GENERIC #ifdef SUX_LOCK_GENERIC
void ssux_lock_low::init() template<bool spinloop>
void ssux_lock_impl<spinloop>::init()
{ {
DBUG_ASSERT(!is_locked_or_waiting()); DBUG_ASSERT(!is_locked_or_waiting());
pthread_mutex_init(&mutex, nullptr); pthread_mutex_init(&mutex, nullptr);
...@@ -44,7 +45,8 @@ void ssux_lock_low::init() ...@@ -44,7 +45,8 @@ void ssux_lock_low::init()
pthread_cond_init(&cond_exclusive, nullptr); pthread_cond_init(&cond_exclusive, nullptr);
} }
void ssux_lock_low::destroy() template<bool spinloop>
void ssux_lock_impl<spinloop>::destroy()
{ {
DBUG_ASSERT(!is_locked_or_waiting()); DBUG_ASSERT(!is_locked_or_waiting());
pthread_mutex_destroy(&mutex); pthread_mutex_destroy(&mutex);
...@@ -52,7 +54,8 @@ void ssux_lock_low::destroy() ...@@ -52,7 +54,8 @@ void ssux_lock_low::destroy()
pthread_cond_destroy(&cond_exclusive); pthread_cond_destroy(&cond_exclusive);
} }
inline void ssux_lock_low::writer_wait(uint32_t l) template<bool spinloop>
inline void ssux_lock_impl<spinloop>::writer_wait(uint32_t l)
{ {
pthread_mutex_lock(&mutex); pthread_mutex_lock(&mutex);
while (value() == l) while (value() == l)
...@@ -60,7 +63,8 @@ inline void ssux_lock_low::writer_wait(uint32_t l) ...@@ -60,7 +63,8 @@ inline void ssux_lock_low::writer_wait(uint32_t l)
pthread_mutex_unlock(&mutex); pthread_mutex_unlock(&mutex);
} }
inline void ssux_lock_low::readers_wait(uint32_t l) template<bool spinloop>
inline void ssux_lock_impl<spinloop>::readers_wait(uint32_t l)
{ {
pthread_mutex_lock(&mutex); pthread_mutex_lock(&mutex);
while (value() == l) while (value() == l)
...@@ -68,7 +72,8 @@ inline void ssux_lock_low::readers_wait(uint32_t l) ...@@ -68,7 +72,8 @@ inline void ssux_lock_low::readers_wait(uint32_t l)
pthread_mutex_unlock(&mutex); pthread_mutex_unlock(&mutex);
} }
inline void ssux_lock_low::wake() template<bool spinloop>
inline void ssux_lock_impl<spinloop>::wake()
{ {
pthread_mutex_lock(&mutex); pthread_mutex_lock(&mutex);
uint32_t l= value(); uint32_t l= value();
...@@ -85,7 +90,8 @@ inline void ssux_lock_low::wake() ...@@ -85,7 +90,8 @@ inline void ssux_lock_low::wake()
/** Wait for a read lock. /** Wait for a read lock.
@param lock word value from a failed read_trylock() */ @param lock word value from a failed read_trylock() */
void ssux_lock_low::read_lock(uint32_t l) template<bool spinloop>
void ssux_lock_impl<spinloop>::read_lock(uint32_t l)
{ {
do do
{ {
...@@ -105,7 +111,8 @@ void ssux_lock_low::read_lock(uint32_t l) ...@@ -105,7 +111,8 @@ void ssux_lock_low::read_lock(uint32_t l)
pthread_mutex_unlock(&mutex); pthread_mutex_unlock(&mutex);
continue; continue;
} }
else if (spinloop)
{
const unsigned delay= srw_pause_delay(); const unsigned delay= srw_pause_delay();
for (auto spin= srv_n_spin_wait_rounds; spin; spin--) for (auto spin= srv_n_spin_wait_rounds; spin; spin--)
...@@ -116,6 +123,7 @@ void ssux_lock_low::read_lock(uint32_t l) ...@@ -116,6 +123,7 @@ void ssux_lock_low::read_lock(uint32_t l)
else if (l == WRITER_WAITING) else if (l == WRITER_WAITING)
goto wake_writer; goto wake_writer;
} }
}
readers_wait(l); readers_wait(l);
} }
...@@ -124,7 +132,8 @@ void ssux_lock_low::read_lock(uint32_t l) ...@@ -124,7 +132,8 @@ void ssux_lock_low::read_lock(uint32_t l)
/** Wait for an update lock. /** Wait for an update lock.
@param lock word value from a failed update_trylock() */ @param lock word value from a failed update_trylock() */
void ssux_lock_low::update_lock(uint32_t l) template<bool spinloop>
void ssux_lock_impl<spinloop>::update_lock(uint32_t l)
{ {
do do
{ {
...@@ -144,7 +153,7 @@ void ssux_lock_low::update_lock(uint32_t l) ...@@ -144,7 +153,7 @@ void ssux_lock_low::update_lock(uint32_t l)
pthread_mutex_unlock(&mutex); pthread_mutex_unlock(&mutex);
continue; continue;
} }
else else if (spinloop)
{ {
const unsigned delay= srw_pause_delay(); const unsigned delay= srw_pause_delay();
...@@ -165,23 +174,12 @@ void ssux_lock_low::update_lock(uint32_t l) ...@@ -165,23 +174,12 @@ void ssux_lock_low::update_lock(uint32_t l)
/** Wait for a write lock after a failed write_trylock() or upgrade_trylock() /** Wait for a write lock after a failed write_trylock() or upgrade_trylock()
@param holding_u whether we already hold u_lock() */ @param holding_u whether we already hold u_lock() */
void ssux_lock_low::write_lock(bool holding_u) template<bool spinloop>
void ssux_lock_impl<spinloop>::write_lock(bool holding_u)
{ {
const unsigned delay= srw_pause_delay();
for (;;) for (;;)
{ {
uint32_t l= write_lock_wait_start(); uint32_t l= write_lock_wait_start();
/* We are the first writer to be granted the lock. Spin for a while. */
for (auto spin= srv_n_spin_wait_rounds; spin; spin--)
{
l= holding_u ? WRITER_WAITING | UPDATER : WRITER_WAITING;
if (write_lock_wait_try(l))
return;
if (!(l & WRITER_WAITING))
l= write_lock_wait_start();
srw_pause(delay);
}
const uint32_t e= holding_u ? WRITER_WAITING | UPDATER : WRITER_WAITING; const uint32_t e= holding_u ? WRITER_WAITING | UPDATER : WRITER_WAITING;
l= e; l= e;
...@@ -213,21 +211,34 @@ void ssux_lock_low::write_lock(bool holding_u) ...@@ -213,21 +211,34 @@ void ssux_lock_low::write_lock(bool holding_u)
} }
} }
void ssux_lock_low::rd_unlock() { if (read_unlock()) wake(); } template<bool spinloop>
void ssux_lock_low::u_unlock() { update_unlock(); wake(); } void ssux_lock_impl<spinloop>::rd_unlock() { if (read_unlock()) wake(); }
void ssux_lock_low::wr_unlock() { write_unlock(); wake(); } template<bool spinloop>
void ssux_lock_impl<spinloop>::u_unlock() { update_unlock(); wake(); }
template<bool spinloop>
void ssux_lock_impl<spinloop>::wr_unlock() { write_unlock(); wake(); }
template void ssux_lock_impl<false>::init();
template void ssux_lock_impl<false>::destroy();
template void ssux_lock_impl<false>::rd_unlock();
template void ssux_lock_impl<false>::u_unlock();
template void ssux_lock_impl<false>::wr_unlock();
#else /* SUX_LOCK_GENERIC */ #else /* SUX_LOCK_GENERIC */
static_assert(4 == sizeof(rw_lock), "ABI"); static_assert(4 == sizeof(rw_lock), "ABI");
# ifdef _WIN32 # ifdef _WIN32
# include <synchapi.h> # include <synchapi.h>
inline void srw_mutex::wait(uint32_t lk) template<bool spinloop>
inline void srw_mutex_impl<spinloop>::wait(uint32_t lk)
{ WaitOnAddress(&lock, &lk, 4, INFINITE); } { WaitOnAddress(&lock, &lk, 4, INFINITE); }
void srw_mutex::wake() { WakeByAddressSingle(&lock); } template<bool spinloop>
void srw_mutex_impl<spinloop>::wake() { WakeByAddressSingle(&lock); }
inline void ssux_lock_low::wait(uint32_t lk) template<bool spinloop>
inline void ssux_lock_impl<spinloop>::wait(uint32_t lk)
{ WaitOnAddress(&readers, &lk, 4, INFINITE); } { WaitOnAddress(&readers, &lk, 4, INFINITE); }
void ssux_lock_low::wake() { WakeByAddressSingle(&readers); } template<bool spinloop>
void ssux_lock_impl<spinloop>::wake() { WakeByAddressSingle(&readers); }
# else # else
# ifdef __linux__ # ifdef __linux__
...@@ -244,16 +255,27 @@ void ssux_lock_low::wake() { WakeByAddressSingle(&readers); } ...@@ -244,16 +255,27 @@ void ssux_lock_low::wake() { WakeByAddressSingle(&readers); }
# error "no futex support" # error "no futex support"
# endif # endif
inline void srw_mutex::wait(uint32_t lk) { SRW_FUTEX(&lock, WAIT, lk); } template<bool spinloop>
void srw_mutex::wake() { SRW_FUTEX(&lock, WAKE, 1); } inline void srw_mutex_impl<spinloop>::wait(uint32_t lk)
{ SRW_FUTEX(&lock, WAIT, lk); }
template<bool spinloop>
void srw_mutex_impl<spinloop>::wake() { SRW_FUTEX(&lock, WAKE, 1); }
inline void ssux_lock_low::wait(uint32_t lk) { SRW_FUTEX(&readers, WAIT, lk); } template<bool spinloop>
void ssux_lock_low::wake() { SRW_FUTEX(&readers, WAKE, 1); } inline void ssux_lock_impl<spinloop>::wait(uint32_t lk)
{ SRW_FUTEX(&readers, WAIT, lk); }
template<bool spinloop>
void ssux_lock_impl<spinloop>::wake() { SRW_FUTEX(&readers, WAKE, 1); }
# endif # endif
template void srw_mutex_impl<false>::wake();
template void ssux_lock_impl<false>::wake();
template void srw_mutex_impl<true>::wake();
template void ssux_lock_impl<true>::wake();
void srw_mutex::wait_and_lock() template<>
void srw_mutex_impl<true>::wait_and_lock()
{ {
uint32_t lk= 1 + lock.fetch_add(1, std::memory_order_relaxed); uint32_t lk= 1 + lock.fetch_add(1, std::memory_order_relaxed);
...@@ -295,7 +317,31 @@ void srw_mutex::wait_and_lock() ...@@ -295,7 +317,31 @@ void srw_mutex::wait_and_lock()
} }
} }
void ssux_lock_low::wr_wait(uint32_t lk) template<>
void srw_mutex_impl<false>::wait_and_lock()
{
uint32_t lk= 1 + lock.fetch_add(1, std::memory_order_relaxed);
for (;; wait(lk))
{
if (lk & HOLDER)
{
lk= lock.load(std::memory_order_relaxed);
if (lk & HOLDER)
continue;
}
lk= lock.fetch_or(HOLDER, std::memory_order_relaxed);
if (!(lk & HOLDER))
{
DBUG_ASSERT(lk);
std::atomic_thread_fence(std::memory_order_acquire);
return;
}
DBUG_ASSERT(lk > HOLDER);
}
}
template<bool spinloop>
void ssux_lock_impl<spinloop>::wr_wait(uint32_t lk)
{ {
DBUG_ASSERT(writer.is_locked()); DBUG_ASSERT(writer.is_locked());
DBUG_ASSERT(lk); DBUG_ASSERT(lk);
...@@ -310,7 +356,11 @@ void ssux_lock_low::wr_wait(uint32_t lk) ...@@ -310,7 +356,11 @@ void ssux_lock_low::wr_wait(uint32_t lk)
while (lk != WRITER); while (lk != WRITER);
} }
void ssux_lock_low::rd_wait() template void ssux_lock_impl<true>::wr_wait(uint32_t);
template void ssux_lock_impl<false>::wr_wait(uint32_t);
template<bool spinloop>
void ssux_lock_impl<spinloop>::rd_wait()
{ {
for (;;) for (;;)
{ {
...@@ -329,10 +379,22 @@ void ssux_lock_low::rd_wait() ...@@ -329,10 +379,22 @@ void ssux_lock_low::rd_wait()
} }
writer.wr_unlock(); writer.wr_unlock();
} }
template void ssux_lock_impl<true>::rd_wait();
template void ssux_lock_impl<false>::rd_wait();
#endif /* SUX_LOCK_GENERIC */ #endif /* SUX_LOCK_GENERIC */
#ifdef UNIV_PFS_RWLOCK #ifdef UNIV_PFS_RWLOCK
void srw_lock::psi_rd_lock(const char *file, unsigned line) # if defined _WIN32 || defined SUX_LOCK_GENERIC
# define void_srw_lock void srw_lock_impl
# else
# define void_srw_lock template<bool spinloop> void srw_lock_impl<spinloop>
template void srw_lock_impl<false>::psi_rd_lock(const char*, unsigned);
template void srw_lock_impl<false>::psi_wr_lock(const char*, unsigned);
template void srw_lock_impl<true>::psi_rd_lock(const char*, unsigned);
template void srw_lock_impl<true>::psi_wr_lock(const char*, unsigned);
# endif
void_srw_lock::psi_rd_lock(const char *file, unsigned line)
{ {
PSI_rwlock_locker_state state; PSI_rwlock_locker_state state;
const bool nowait= lock.rd_lock_try(); const bool nowait= lock.rd_lock_try();
...@@ -348,7 +410,7 @@ void srw_lock::psi_rd_lock(const char *file, unsigned line) ...@@ -348,7 +410,7 @@ void srw_lock::psi_rd_lock(const char *file, unsigned line)
lock.rd_lock(); lock.rd_lock();
} }
void srw_lock::psi_wr_lock(const char *file, unsigned line) void_srw_lock::psi_wr_lock(const char *file, unsigned line)
{ {
PSI_rwlock_locker_state state; PSI_rwlock_locker_state state;
const bool nowait= lock.wr_lock_try(); const bool nowait= lock.wr_lock_try();
...@@ -428,7 +490,7 @@ void ssux_lock::psi_u_wr_upgrade(const char *file, unsigned line) ...@@ -428,7 +490,7 @@ void ssux_lock::psi_u_wr_upgrade(const char *file, unsigned line)
DBUG_ASSERT(lock.writer.is_locked()); DBUG_ASSERT(lock.writer.is_locked());
uint32_t lk= 1; uint32_t lk= 1;
const bool nowait= const bool nowait=
lock.readers.compare_exchange_strong(lk, ssux_lock_low::WRITER, lock.readers.compare_exchange_strong(lk, ssux_lock_impl<false>::WRITER,
std::memory_order_acquire, std::memory_order_acquire,
std::memory_order_relaxed); std::memory_order_relaxed);
if (PSI_rwlock_locker *locker= PSI_RWLOCK_CALL(start_rwlock_wrwait) if (PSI_rwlock_locker *locker= PSI_RWLOCK_CALL(start_rwlock_wrwait)
...@@ -444,4 +506,14 @@ void ssux_lock::psi_u_wr_upgrade(const char *file, unsigned line) ...@@ -444,4 +506,14 @@ void ssux_lock::psi_u_wr_upgrade(const char *file, unsigned line)
else if (!nowait) else if (!nowait)
lock.u_wr_upgrade(); lock.u_wr_upgrade();
} }
#else /* UNIV_PFS_RWLOCK */
template void ssux_lock_impl<false>::rd_lock();
# ifdef SUX_LOCK_GENERIC
template void ssux_lock_impl<false>::write_lock(bool);
template void ssux_lock_impl<false>::update_lock(uint32_t);
# else
template void ssux_lock_impl<false>::rd_unlock();
template void ssux_lock_impl<false>::u_unlock();
template void ssux_lock_impl<false>::wr_unlock();
# endif
#endif /* UNIV_PFS_RWLOCK */ #endif /* UNIV_PFS_RWLOCK */
...@@ -62,7 +62,7 @@ static void test_srw_lock() ...@@ -62,7 +62,7 @@ static void test_srw_lock()
} }
} }
static ssux_lock_low ssux; static ssux_lock_impl<false> ssux;
static void test_ssux_lock() static void test_ssux_lock()
{ {
...@@ -95,7 +95,7 @@ static void test_ssux_lock() ...@@ -95,7 +95,7 @@ static void test_ssux_lock()
} }
} }
static sux_lock<ssux_lock_low> sux; static sux_lock<ssux_lock_impl<true>> sux;
static void test_sux_lock() static void test_sux_lock()
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment