Commit 131d9a5d authored by Sergey Vojtovich's avatar Sergey Vojtovich Committed by Marko Mäkelä

Allocate lock_sys statically

There is only one lock_sys. Allocate it statically in order to avoid
dereferencing a pointer whenever accessing it. Also, align some
members to their own cache line in order to avoid false sharing.

lock_sys_t::create(): The deferred constructor.

lock_sys_t::close(): The early destructor.
parent 59dd0464
...@@ -3672,8 +3672,6 @@ xtrabackup_backup_func() ...@@ -3672,8 +3672,6 @@ xtrabackup_backup_func()
"innodb_redo_log", SRV_LOG_SPACE_FIRST_ID, 0, "innodb_redo_log", SRV_LOG_SPACE_FIRST_ID, 0,
FIL_TYPE_LOG, NULL); FIL_TYPE_LOG, NULL);
lock_sys_create(srv_lock_table_size);
for (i = 0; i < srv_n_log_files; i++) { for (i = 0; i < srv_n_log_files; i++) {
err = open_or_create_log_file(space, &log_file_created, i); err = open_or_create_log_file(space, &log_file_created, i);
if (err != DB_SUCCESS) { if (err != DB_SUCCESS) {
......
...@@ -3675,7 +3675,7 @@ btr_lift_page_up( ...@@ -3675,7 +3675,7 @@ btr_lift_page_up(
if (dict_index_is_spatial(index)) { if (dict_index_is_spatial(index)) {
lock_mutex_enter(); lock_mutex_enter();
lock_prdt_page_free_from_discard( lock_prdt_page_free_from_discard(
block, lock_sys->prdt_page_hash); block, lock_sys.prdt_page_hash);
lock_mutex_exit(); lock_mutex_exit();
} }
lock_update_copy_and_discard(father_block, block); lock_update_copy_and_discard(father_block, block);
...@@ -3968,7 +3968,7 @@ btr_compress( ...@@ -3968,7 +3968,7 @@ btr_compress(
/* No GAP lock needs to be worrying about */ /* No GAP lock needs to be worrying about */
lock_mutex_enter(); lock_mutex_enter();
lock_prdt_page_free_from_discard( lock_prdt_page_free_from_discard(
block, lock_sys->prdt_page_hash); block, lock_sys.prdt_page_hash);
lock_rec_free_all_from_discard_page(block); lock_rec_free_all_from_discard_page(block);
lock_mutex_exit(); lock_mutex_exit();
} else { } else {
...@@ -4126,7 +4126,7 @@ btr_compress( ...@@ -4126,7 +4126,7 @@ btr_compress(
} }
lock_mutex_enter(); lock_mutex_enter();
lock_prdt_page_free_from_discard( lock_prdt_page_free_from_discard(
block, lock_sys->prdt_page_hash); block, lock_sys.prdt_page_hash);
lock_rec_free_all_from_discard_page(block); lock_rec_free_all_from_discard_page(block);
lock_mutex_exit(); lock_mutex_exit();
} else { } else {
......
...@@ -3058,7 +3058,7 @@ buf_pool_resize() ...@@ -3058,7 +3058,7 @@ buf_pool_resize()
/* normalize lock_sys */ /* normalize lock_sys */
srv_lock_table_size = 5 * (srv_buf_pool_size / UNIV_PAGE_SIZE); srv_lock_table_size = 5 * (srv_buf_pool_size / UNIV_PAGE_SIZE);
lock_sys_resize(srv_lock_table_size); lock_sys.resize(srv_lock_table_size);
/* normalize btr_search_sys */ /* normalize btr_search_sys */
btr_search_sys_resize( btr_search_sys_resize(
......
...@@ -1255,8 +1255,8 @@ rtr_check_discard_page( ...@@ -1255,8 +1255,8 @@ rtr_check_discard_page(
mutex_exit(&index->rtr_track->rtr_active_mutex); mutex_exit(&index->rtr_track->rtr_active_mutex);
lock_mutex_enter(); lock_mutex_enter();
lock_prdt_page_free_from_discard(block, lock_sys->prdt_hash); lock_prdt_page_free_from_discard(block, lock_sys.prdt_hash);
lock_prdt_page_free_from_discard(block, lock_sys->prdt_page_hash); lock_prdt_page_free_from_discard(block, lock_sys.prdt_page_hash);
lock_mutex_exit(); lock_mutex_exit();
} }
......
...@@ -1908,7 +1908,7 @@ struct dict_table_t { ...@@ -1908,7 +1908,7 @@ struct dict_table_t {
ulong n_waiting_or_granted_auto_inc_locks; ulong n_waiting_or_granted_auto_inc_locks;
/** The transaction that currently holds the the AUTOINC lock on this /** The transaction that currently holds the the AUTOINC lock on this
table. Protected by lock_sys->mutex. */ table. Protected by lock_sys.mutex. */
const trx_t* autoinc_trx; const trx_t* autoinc_trx;
/* @} */ /* @} */
...@@ -1923,7 +1923,7 @@ struct dict_table_t { ...@@ -1923,7 +1923,7 @@ struct dict_table_t {
/** Count of the number of record locks on this table. We use this to /** Count of the number of record locks on this table. We use this to
determine whether we can evict the table from the dictionary cache. determine whether we can evict the table from the dictionary cache.
It is protected by lock_sys->mutex. */ It is protected by lock_sys.mutex. */
ulint n_rec_locks; ulint n_rec_locks;
#ifndef DBUG_ASSERT_EXISTS #ifndef DBUG_ASSERT_EXISTS
...@@ -1935,7 +1935,7 @@ struct dict_table_t { ...@@ -1935,7 +1935,7 @@ struct dict_table_t {
ulint n_ref_count; ulint n_ref_count;
public: public:
/** List of locks on the table. Protected by lock_sys->mutex. */ /** List of locks on the table. Protected by lock_sys.mutex. */
table_lock_list_t locks; table_lock_list_t locks;
/** Timestamp of the last modification of this table. */ /** Timestamp of the last modification of this table. */
......
...@@ -65,23 +65,6 @@ ulint ...@@ -65,23 +65,6 @@ ulint
lock_get_size(void); lock_get_size(void);
/*===============*/ /*===============*/
/*********************************************************************//** /*********************************************************************//**
Creates the lock system at database start. */
void
lock_sys_create(
/*============*/
ulint n_cells); /*!< in: number of slots in lock hash table */
/** Resize the lock hash table.
@param[in] n_cells number of slots in lock hash table */
void
lock_sys_resize(
ulint n_cells);
/*********************************************************************//**
Closes the lock system at database shutdown. */
void
lock_sys_close(void);
/*================*/
/*********************************************************************//**
Gets the heap_no of the smallest user record on a page. Gets the heap_no of the smallest user record on a page.
@return heap_no of smallest user record, or PAGE_HEAP_NO_SUPREMUM */ @return heap_no of smallest user record, or PAGE_HEAP_NO_SUPREMUM */
UNIV_INLINE UNIV_INLINE
...@@ -605,7 +588,7 @@ lock_print_info_all_transactions( ...@@ -605,7 +588,7 @@ lock_print_info_all_transactions(
Return approximate number or record locks (bits set in the bitmap) for Return approximate number or record locks (bits set in the bitmap) for
this transaction. Since delete-marked records may be removed, the this transaction. Since delete-marked records may be removed, the
record count will not be precise. record count will not be precise.
The caller must be holding lock_sys->mutex. */ The caller must be holding lock_sys.mutex. */
ulint ulint
lock_number_of_rows_locked( lock_number_of_rows_locked(
/*=======================*/ /*=======================*/
...@@ -614,7 +597,7 @@ lock_number_of_rows_locked( ...@@ -614,7 +597,7 @@ lock_number_of_rows_locked(
/*********************************************************************//** /*********************************************************************//**
Return the number of table locks for a transaction. Return the number of table locks for a transaction.
The caller must be holding lock_sys->mutex. */ The caller must be holding lock_sys.mutex. */
ulint ulint
lock_number_of_tables_locked( lock_number_of_tables_locked(
/*=========================*/ /*=========================*/
...@@ -897,11 +880,12 @@ struct lock_op_t{ ...@@ -897,11 +880,12 @@ struct lock_op_t{
typedef ib_mutex_t LockMutex; typedef ib_mutex_t LockMutex;
/** The lock system struct */ /** The lock system struct */
struct lock_sys_t{ class lock_sys_t
char pad1[CACHE_LINE_SIZE]; /*!< padding to prevent other {
memory update hotspots from bool m_initialised;
residing on the same memory
cache line */ public:
MY_ALIGNED(CACHE_LINE_SIZE)
LockMutex mutex; /*!< Mutex protecting the LockMutex mutex; /*!< Mutex protecting the
locks */ locks */
hash_table_t* rec_hash; /*!< hash table of the record hash_table_t* rec_hash; /*!< hash table of the record
...@@ -911,13 +895,13 @@ struct lock_sys_t{ ...@@ -911,13 +895,13 @@ struct lock_sys_t{
hash_table_t* prdt_page_hash; /*!< hash table of the page hash_table_t* prdt_page_hash; /*!< hash table of the page
lock */ lock */
char pad2[CACHE_LINE_SIZE]; /*!< Padding */ MY_ALIGNED(CACHE_LINE_SIZE)
LockMutex wait_mutex; /*!< Mutex protecting the LockMutex wait_mutex; /*!< Mutex protecting the
next two fields */ next two fields */
srv_slot_t* waiting_threads; /*!< Array of user threads srv_slot_t* waiting_threads; /*!< Array of user threads
suspended while waiting for suspended while waiting for
locks within InnoDB, protected locks within InnoDB, protected
by the lock_sys->wait_mutex; by the lock_sys.wait_mutex;
os_event_set() and os_event_set() and
os_event_reset() on os_event_reset() on
waiting_threads[]->event waiting_threads[]->event
...@@ -926,7 +910,7 @@ struct lock_sys_t{ ...@@ -926,7 +910,7 @@ struct lock_sys_t{
srv_slot_t* last_slot; /*!< highest slot ever used srv_slot_t* last_slot; /*!< highest slot ever used
in the waiting_threads array, in the waiting_threads array,
protected by protected by
lock_sys->wait_mutex */ lock_sys.wait_mutex */
ulint n_lock_max_wait_time; /*!< Max wait time */ ulint n_lock_max_wait_time; /*!< Max wait time */
...@@ -938,6 +922,38 @@ struct lock_sys_t{ ...@@ -938,6 +922,38 @@ struct lock_sys_t{
bool timeout_thread_active; /*!< True if the timeout thread bool timeout_thread_active; /*!< True if the timeout thread
is running */ is running */
/**
Constructor.
Some members may require late initialisation, thus we just mark object as
uninitialised. Real initialisation happens in create().
*/
lock_sys_t(): m_initialised(false) {}
bool is_initialised() { return m_initialised; }
/**
Creates the lock system at database start.
@param[in] n_cells number of slots in lock hash table
*/
void create(ulint n_cells);
/**
Resize the lock hash table.
@param[in] n_cells number of slots in lock hash table
*/
void resize(ulint n_cells);
/** Closes the lock system at database shutdown. */
void close();
}; };
/*************************************************************//** /*************************************************************//**
...@@ -982,36 +998,36 @@ lock_rec_trx_wait( ...@@ -982,36 +998,36 @@ lock_rec_trx_wait(
ulint type); ulint type);
/** The lock system */ /** The lock system */
extern lock_sys_t* lock_sys; extern lock_sys_t lock_sys;
/** Test if lock_sys->mutex can be acquired without waiting. */ /** Test if lock_sys.mutex can be acquired without waiting. */
#define lock_mutex_enter_nowait() \ #define lock_mutex_enter_nowait() \
(lock_sys->mutex.trylock(__FILE__, __LINE__)) (lock_sys.mutex.trylock(__FILE__, __LINE__))
/** Test if lock_sys->mutex is owned. */ /** Test if lock_sys.mutex is owned. */
#define lock_mutex_own() (lock_sys->mutex.is_owned()) #define lock_mutex_own() (lock_sys.mutex.is_owned())
/** Acquire the lock_sys->mutex. */ /** Acquire the lock_sys.mutex. */
#define lock_mutex_enter() do { \ #define lock_mutex_enter() do { \
mutex_enter(&lock_sys->mutex); \ mutex_enter(&lock_sys.mutex); \
} while (0) } while (0)
/** Release the lock_sys->mutex. */ /** Release the lock_sys.mutex. */
#define lock_mutex_exit() do { \ #define lock_mutex_exit() do { \
lock_sys->mutex.exit(); \ lock_sys.mutex.exit(); \
} while (0) } while (0)
/** Test if lock_sys->wait_mutex is owned. */ /** Test if lock_sys.wait_mutex is owned. */
#define lock_wait_mutex_own() (lock_sys->wait_mutex.is_owned()) #define lock_wait_mutex_own() (lock_sys.wait_mutex.is_owned())
/** Acquire the lock_sys->wait_mutex. */ /** Acquire the lock_sys.wait_mutex. */
#define lock_wait_mutex_enter() do { \ #define lock_wait_mutex_enter() do { \
mutex_enter(&lock_sys->wait_mutex); \ mutex_enter(&lock_sys.wait_mutex); \
} while (0) } while (0)
/** Release the lock_sys->wait_mutex. */ /** Release the lock_sys.wait_mutex. */
#define lock_wait_mutex_exit() do { \ #define lock_wait_mutex_exit() do { \
lock_sys->wait_mutex.exit(); \ lock_sys.wait_mutex.exit(); \
} while (0) } while (0)
#ifdef WITH_WSREP #ifdef WITH_WSREP
......
...@@ -63,7 +63,7 @@ lock_rec_hash( ...@@ -63,7 +63,7 @@ lock_rec_hash(
ulint page_no)/*!< in: page number */ ulint page_no)/*!< in: page number */
{ {
return(unsigned(hash_calc_hash(lock_rec_fold(space, page_no), return(unsigned(hash_calc_hash(lock_rec_fold(space, page_no),
lock_sys->rec_hash))); lock_sys.rec_hash)));
} }
/*********************************************************************//** /*********************************************************************//**
...@@ -99,11 +99,11 @@ lock_hash_get( ...@@ -99,11 +99,11 @@ lock_hash_get(
ulint mode) /*!< in: lock mode */ ulint mode) /*!< in: lock mode */
{ {
if (mode & LOCK_PREDICATE) { if (mode & LOCK_PREDICATE) {
return(lock_sys->prdt_hash); return(lock_sys.prdt_hash);
} else if (mode & LOCK_PRDT_PAGE) { } else if (mode & LOCK_PRDT_PAGE) {
return(lock_sys->prdt_page_hash); return(lock_sys.prdt_page_hash);
} else { } else {
return(lock_sys->rec_hash); return(lock_sys.rec_hash);
} }
} }
...@@ -111,7 +111,7 @@ operator<<(std::ostream& out, const lock_rec_t& lock) ...@@ -111,7 +111,7 @@ operator<<(std::ostream& out, const lock_rec_t& lock)
return(lock.print(out)); return(lock.print(out));
} }
/** Lock struct; protected by lock_sys->mutex */ /** Lock struct; protected by lock_sys.mutex */
struct lock_t { struct lock_t {
trx_t* trx; /*!< transaction owning the trx_t* trx; /*!< transaction owning the
lock */ lock */
......
...@@ -31,7 +31,6 @@ Created 5/7/1996 Heikki Tuuri ...@@ -31,7 +31,6 @@ Created 5/7/1996 Heikki Tuuri
#define lock_t ib_lock_t #define lock_t ib_lock_t
struct lock_t; struct lock_t;
struct lock_sys_t;
struct lock_table_t; struct lock_table_t;
/* Basic lock modes */ /* Basic lock modes */
......
...@@ -590,10 +590,10 @@ class rw_trx_hash_t ...@@ -590,10 +590,10 @@ class rw_trx_hash_t
the transaction may get committed before this method returns. the transaction may get committed before this method returns.
With do_ref_count == false the caller may dereference returned trx pointer With do_ref_count == false the caller may dereference returned trx pointer
only if lock_sys->mutex was acquired before calling find(). only if lock_sys.mutex was acquired before calling find().
With do_ref_count == true caller may dereference trx even if it is not With do_ref_count == true caller may dereference trx even if it is not
holding lock_sys->mutex. Caller is responsible for calling holding lock_sys.mutex. Caller is responsible for calling
trx->release_reference() when it is done playing with trx. trx->release_reference() when it is done playing with trx.
Ideally this method should get caller rw_trx_hash_pins along with trx Ideally this method should get caller rw_trx_hash_pins along with trx
......
...@@ -266,7 +266,7 @@ This function is used to find one X/Open XA distributed transaction ...@@ -266,7 +266,7 @@ This function is used to find one X/Open XA distributed transaction
which is in the prepared state which is in the prepared state
@return trx or NULL; on match, the trx->xid will be invalidated; @return trx or NULL; on match, the trx->xid will be invalidated;
note that the trx may have been committed, unless the caller is note that the trx may have been committed, unless the caller is
holding lock_sys->mutex */ holding lock_sys.mutex */
trx_t * trx_t *
trx_get_trx_by_xid( trx_get_trx_by_xid(
/*===============*/ /*===============*/
...@@ -327,7 +327,7 @@ trx_print_low( ...@@ -327,7 +327,7 @@ trx_print_low(
/**********************************************************************//** /**********************************************************************//**
Prints info about a transaction. Prints info about a transaction.
The caller must hold lock_sys->mutex and trx_sys.mutex. The caller must hold lock_sys.mutex and trx_sys.mutex.
When possible, use trx_print() instead. */ When possible, use trx_print() instead. */
void void
trx_print_latched( trx_print_latched(
...@@ -339,7 +339,7 @@ trx_print_latched( ...@@ -339,7 +339,7 @@ trx_print_latched(
/**********************************************************************//** /**********************************************************************//**
Prints info about a transaction. Prints info about a transaction.
Acquires and releases lock_sys->mutex. */ Acquires and releases lock_sys.mutex. */
void void
trx_print( trx_print(
/*======*/ /*======*/
...@@ -612,7 +612,7 @@ To query the state either of the mutexes is sufficient within the locking ...@@ -612,7 +612,7 @@ To query the state either of the mutexes is sufficient within the locking
code and no mutex is required when the query thread is no longer waiting. */ code and no mutex is required when the query thread is no longer waiting. */
/** The locks and state of an active transaction. Protected by /** The locks and state of an active transaction. Protected by
lock_sys->mutex, trx->mutex or both. */ lock_sys.mutex, trx->mutex or both. */
struct trx_lock_t { struct trx_lock_t {
ulint n_active_thrs; /*!< number of active query threads */ ulint n_active_thrs; /*!< number of active query threads */
...@@ -624,10 +624,10 @@ struct trx_lock_t { ...@@ -624,10 +624,10 @@ struct trx_lock_t {
TRX_QUE_LOCK_WAIT, this points to TRX_QUE_LOCK_WAIT, this points to
the lock request, otherwise this is the lock request, otherwise this is
NULL; set to non-NULL when holding NULL; set to non-NULL when holding
both trx->mutex and lock_sys->mutex; both trx->mutex and lock_sys.mutex;
set to NULL when holding set to NULL when holding
lock_sys->mutex; readers should lock_sys.mutex; readers should
hold lock_sys->mutex, except when hold lock_sys.mutex, except when
they are holding trx->mutex and they are holding trx->mutex and
wait_lock==NULL */ wait_lock==NULL */
ib_uint64_t deadlock_mark; /*!< A mark field that is initialized ib_uint64_t deadlock_mark; /*!< A mark field that is initialized
...@@ -641,13 +641,13 @@ struct trx_lock_t { ...@@ -641,13 +641,13 @@ struct trx_lock_t {
resolution, it sets this to true. resolution, it sets this to true.
Protected by trx->mutex. */ Protected by trx->mutex. */
time_t wait_started; /*!< lock wait started at this time, time_t wait_started; /*!< lock wait started at this time,
protected only by lock_sys->mutex */ protected only by lock_sys.mutex */
que_thr_t* wait_thr; /*!< query thread belonging to this que_thr_t* wait_thr; /*!< query thread belonging to this
trx that is in QUE_THR_LOCK_WAIT trx that is in QUE_THR_LOCK_WAIT
state. For threads suspended in a state. For threads suspended in a
lock wait, this is protected by lock wait, this is protected by
lock_sys->mutex. Otherwise, this may lock_sys.mutex. Otherwise, this may
only be modified by the thread that is only be modified by the thread that is
serving the running transaction. */ serving the running transaction. */
...@@ -660,12 +660,12 @@ struct trx_lock_t { ...@@ -660,12 +660,12 @@ struct trx_lock_t {
ulint table_cached; /*!< Next free table lock in pool */ ulint table_cached; /*!< Next free table lock in pool */
mem_heap_t* lock_heap; /*!< memory heap for trx_locks; mem_heap_t* lock_heap; /*!< memory heap for trx_locks;
protected by lock_sys->mutex */ protected by lock_sys.mutex */
trx_lock_list_t trx_locks; /*!< locks requested by the transaction; trx_lock_list_t trx_locks; /*!< locks requested by the transaction;
insertions are protected by trx->mutex insertions are protected by trx->mutex
and lock_sys->mutex; removals are and lock_sys.mutex; removals are
protected by lock_sys->mutex */ protected by lock_sys.mutex */
lock_pool_t table_locks; /*!< All table locks requested by this lock_pool_t table_locks; /*!< All table locks requested by this
transaction, including AUTOINC locks */ transaction, including AUTOINC locks */
...@@ -788,7 +788,7 @@ transactions. The trx_sys.mutex prevents a race condition between it ...@@ -788,7 +788,7 @@ transactions. The trx_sys.mutex prevents a race condition between it
and lock_trx_release_locks() [invoked by trx_commit()]. and lock_trx_release_locks() [invoked by trx_commit()].
* trx_print_low() may access transactions not associated with the current * trx_print_low() may access transactions not associated with the current
thread. The caller must be holding lock_sys->mutex. thread. The caller must be holding lock_sys.mutex.
* When a transaction handle is in the trx_sys.mysql_trx_list or * When a transaction handle is in the trx_sys.mysql_trx_list or
trx_sys.trx_list, some of its fields must not be modified without trx_sys.trx_list, some of its fields must not be modified without
...@@ -797,7 +797,7 @@ holding trx_sys.mutex exclusively. ...@@ -797,7 +797,7 @@ holding trx_sys.mutex exclusively.
* The locking code (in particular, lock_deadlock_recursive() and * The locking code (in particular, lock_deadlock_recursive() and
lock_rec_convert_impl_to_expl()) will access transactions associated lock_rec_convert_impl_to_expl()) will access transactions associated
to other connections. The locks of transactions are protected by to other connections. The locks of transactions are protected by
lock_sys->mutex and sometimes by trx->mutex. */ lock_sys.mutex and sometimes by trx->mutex. */
typedef enum { typedef enum {
TRX_SERVER_ABORT = 0, TRX_SERVER_ABORT = 0,
...@@ -870,7 +870,7 @@ struct trx_t { ...@@ -870,7 +870,7 @@ struct trx_t {
TrxMutex mutex; /*!< Mutex protecting the fields TrxMutex mutex; /*!< Mutex protecting the fields
state and lock (except some fields state and lock (except some fields
of lock, which are protected by of lock, which are protected by
lock_sys->mutex) */ lock_sys.mutex) */
/* Note: in_depth was split from in_innodb for fixing a RO /* Note: in_depth was split from in_innodb for fixing a RO
performance issue. Acquiring the trx_t::mutex for each row performance issue. Acquiring the trx_t::mutex for each row
...@@ -961,7 +961,7 @@ struct trx_t { ...@@ -961,7 +961,7 @@ struct trx_t {
ACTIVE->COMMITTED is possible when the transaction is in ACTIVE->COMMITTED is possible when the transaction is in
rw_trx_hash. rw_trx_hash.
Transitions to COMMITTED are protected by both lock_sys->mutex Transitions to COMMITTED are protected by both lock_sys.mutex
and trx->mutex. and trx->mutex.
NOTE: Some of these state change constraints are an overkill, NOTE: Some of these state change constraints are an overkill,
...@@ -974,7 +974,7 @@ struct trx_t { ...@@ -974,7 +974,7 @@ struct trx_t {
transaction, or NULL if not yet set */ transaction, or NULL if not yet set */
trx_lock_t lock; /*!< Information about the transaction trx_lock_t lock; /*!< Information about the transaction
locks and state. Protected by locks and state. Protected by
trx->mutex or lock_sys->mutex trx->mutex or lock_sys.mutex
or both */ or both */
bool is_recovered; /*!< 0=normal transaction, bool is_recovered; /*!< 0=normal transaction,
1=recovered, must be rolled back, 1=recovered, must be rolled back,
...@@ -1156,7 +1156,7 @@ struct trx_t { ...@@ -1156,7 +1156,7 @@ struct trx_t {
also in the lock list trx_locks. This also in the lock list trx_locks. This
vector needs to be freed explicitly vector needs to be freed explicitly
when the trx instance is destroyed. when the trx instance is destroyed.
Protected by lock_sys->mutex. */ Protected by lock_sys.mutex. */
/*------------------------------*/ /*------------------------------*/
bool read_only; /*!< true if transaction is flagged bool read_only; /*!< true if transaction is flagged
as a READ-ONLY transaction. as a READ-ONLY transaction.
......
This diff is collapsed.
...@@ -539,7 +539,7 @@ lock_prdt_insert_check_and_lock( ...@@ -539,7 +539,7 @@ lock_prdt_insert_check_and_lock(
lock_t* lock; lock_t* lock;
/* Only need to check locks on prdt_hash */ /* Only need to check locks on prdt_hash */
lock = lock_rec_get_first(lock_sys->prdt_hash, block, PRDT_HEAPNO); lock = lock_rec_get_first(lock_sys.prdt_hash, block, PRDT_HEAPNO);
if (lock == NULL) { if (lock == NULL) {
lock_mutex_exit(); lock_mutex_exit();
...@@ -626,7 +626,7 @@ lock_prdt_update_parent( ...@@ -626,7 +626,7 @@ lock_prdt_update_parent(
/* Get all locks in parent */ /* Get all locks in parent */
for (lock = lock_rec_get_first_on_page_addr( for (lock = lock_rec_get_first_on_page_addr(
lock_sys->prdt_hash, space, page_no); lock_sys.prdt_hash, space, page_no);
lock; lock;
lock = lock_rec_get_next_on_page(lock)) { lock = lock_rec_get_next_on_page(lock)) {
lock_prdt_t* lock_prdt; lock_prdt_t* lock_prdt;
...@@ -816,8 +816,8 @@ lock_prdt_lock( ...@@ -816,8 +816,8 @@ lock_prdt_lock(
ut_ad(type_mode & (LOCK_PREDICATE | LOCK_PRDT_PAGE)); ut_ad(type_mode & (LOCK_PREDICATE | LOCK_PRDT_PAGE));
hash_table_t* hash = type_mode == LOCK_PREDICATE hash_table_t* hash = type_mode == LOCK_PREDICATE
? lock_sys->prdt_hash ? lock_sys.prdt_hash
: lock_sys->prdt_page_hash; : lock_sys.prdt_page_hash;
/* Another transaction cannot have an implicit lock on the record, /* Another transaction cannot have an implicit lock on the record,
because when we come here, we already have modified the clustered because when we come here, we already have modified the clustered
...@@ -923,7 +923,7 @@ lock_place_prdt_page_lock( ...@@ -923,7 +923,7 @@ lock_place_prdt_page_lock(
lock_mutex_enter(); lock_mutex_enter();
const lock_t* lock = lock_rec_get_first_on_page_addr( const lock_t* lock = lock_rec_get_first_on_page_addr(
lock_sys->prdt_page_hash, space, page_no); lock_sys.prdt_page_hash, space, page_no);
const ulint mode = LOCK_S | LOCK_PRDT_PAGE; const ulint mode = LOCK_S | LOCK_PRDT_PAGE;
trx_t* trx = thr_get_trx(thr); trx_t* trx = thr_get_trx(thr);
...@@ -977,7 +977,7 @@ lock_test_prdt_page_lock( ...@@ -977,7 +977,7 @@ lock_test_prdt_page_lock(
lock_mutex_enter(); lock_mutex_enter();
lock = lock_rec_get_first_on_page_addr( lock = lock_rec_get_first_on_page_addr(
lock_sys->prdt_page_hash, space, page_no); lock_sys.prdt_page_hash, space, page_no);
lock_mutex_exit(); lock_mutex_exit();
...@@ -997,13 +997,13 @@ lock_prdt_rec_move( ...@@ -997,13 +997,13 @@ lock_prdt_rec_move(
{ {
lock_t* lock; lock_t* lock;
if (!lock_sys->prdt_hash) { if (!lock_sys.prdt_hash) {
return; return;
} }
lock_mutex_enter(); lock_mutex_enter();
for (lock = lock_rec_get_first(lock_sys->prdt_hash, for (lock = lock_rec_get_first(lock_sys.prdt_hash,
donator, PRDT_HEAPNO); donator, PRDT_HEAPNO);
lock != NULL; lock != NULL;
lock = lock_rec_get_next(PRDT_HEAPNO, lock)) { lock = lock_rec_get_next(PRDT_HEAPNO, lock)) {
......
...@@ -46,7 +46,7 @@ lock_wait_table_print(void) ...@@ -46,7 +46,7 @@ lock_wait_table_print(void)
{ {
ut_ad(lock_wait_mutex_own()); ut_ad(lock_wait_mutex_own());
const srv_slot_t* slot = lock_sys->waiting_threads; const srv_slot_t* slot = lock_sys.waiting_threads;
for (ulint i = 0; i < OS_THREAD_MAX_N; i++, ++slot) { for (ulint i = 0; i < OS_THREAD_MAX_N; i++, ++slot) {
...@@ -72,7 +72,7 @@ lock_wait_table_release_slot( ...@@ -72,7 +72,7 @@ lock_wait_table_release_slot(
srv_slot_t* slot) /*!< in: slot to release */ srv_slot_t* slot) /*!< in: slot to release */
{ {
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
srv_slot_t* upper = lock_sys->waiting_threads + OS_THREAD_MAX_N; srv_slot_t* upper = lock_sys.waiting_threads + OS_THREAD_MAX_N;
#endif /* UNIV_DEBUG */ #endif /* UNIV_DEBUG */
lock_wait_mutex_enter(); lock_wait_mutex_enter();
...@@ -83,7 +83,7 @@ lock_wait_table_release_slot( ...@@ -83,7 +83,7 @@ lock_wait_table_release_slot(
ut_ad(slot->thr->slot == slot); ut_ad(slot->thr->slot == slot);
/* Must be within the array boundaries. */ /* Must be within the array boundaries. */
ut_ad(slot >= lock_sys->waiting_threads); ut_ad(slot >= lock_sys.waiting_threads);
ut_ad(slot < upper); ut_ad(slot < upper);
/* Note: When we reserve the slot we use the trx_t::mutex to update /* Note: When we reserve the slot we use the trx_t::mutex to update
...@@ -102,23 +102,23 @@ lock_wait_table_release_slot( ...@@ -102,23 +102,23 @@ lock_wait_table_release_slot(
lock_mutex_exit(); lock_mutex_exit();
/* Scan backwards and adjust the last free slot pointer. */ /* Scan backwards and adjust the last free slot pointer. */
for (slot = lock_sys->last_slot; for (slot = lock_sys.last_slot;
slot > lock_sys->waiting_threads && !slot->in_use; slot > lock_sys.waiting_threads && !slot->in_use;
--slot) { --slot) {
/* No op */ /* No op */
} }
/* Either the array is empty or the last scanned slot is in use. */ /* Either the array is empty or the last scanned slot is in use. */
ut_ad(slot->in_use || slot == lock_sys->waiting_threads); ut_ad(slot->in_use || slot == lock_sys.waiting_threads);
lock_sys->last_slot = slot + 1; lock_sys.last_slot = slot + 1;
/* The last slot is either outside of the array boundary or it's /* The last slot is either outside of the array boundary or it's
on an empty slot. */ on an empty slot. */
ut_ad(lock_sys->last_slot == upper || !lock_sys->last_slot->in_use); ut_ad(lock_sys.last_slot == upper || !lock_sys.last_slot->in_use);
ut_ad(lock_sys->last_slot >= lock_sys->waiting_threads); ut_ad(lock_sys.last_slot >= lock_sys.waiting_threads);
ut_ad(lock_sys->last_slot <= upper); ut_ad(lock_sys.last_slot <= upper);
lock_wait_mutex_exit(); lock_wait_mutex_exit();
} }
...@@ -140,7 +140,7 @@ lock_wait_table_reserve_slot( ...@@ -140,7 +140,7 @@ lock_wait_table_reserve_slot(
ut_ad(lock_wait_mutex_own()); ut_ad(lock_wait_mutex_own());
ut_ad(trx_mutex_own(thr_get_trx(thr))); ut_ad(trx_mutex_own(thr_get_trx(thr)));
slot = lock_sys->waiting_threads; slot = lock_sys.waiting_threads;
for (i = OS_THREAD_MAX_N; i--; ++slot) { for (i = OS_THREAD_MAX_N; i--; ++slot) {
if (!slot->in_use) { if (!slot->in_use) {
...@@ -158,12 +158,12 @@ lock_wait_table_reserve_slot( ...@@ -158,12 +158,12 @@ lock_wait_table_reserve_slot(
slot->suspend_time = ut_time(); slot->suspend_time = ut_time();
slot->wait_timeout = wait_timeout; slot->wait_timeout = wait_timeout;
if (slot == lock_sys->last_slot) { if (slot == lock_sys.last_slot) {
++lock_sys->last_slot; ++lock_sys.last_slot;
} }
ut_ad(lock_sys->last_slot ut_ad(lock_sys.last_slot
<= lock_sys->waiting_threads + OS_THREAD_MAX_N); <= lock_sys.waiting_threads + OS_THREAD_MAX_N);
return(slot); return(slot);
} }
...@@ -184,7 +184,7 @@ lock_wait_table_reserve_slot( ...@@ -184,7 +184,7 @@ lock_wait_table_reserve_slot(
check if lock timeout was for priority thread, check if lock timeout was for priority thread,
as a side effect trigger lock monitor as a side effect trigger lock monitor
@param[in] trx transaction owning the lock @param[in] trx transaction owning the lock
@param[in] locked true if trx and lock_sys_mutex is ownd @param[in] locked true if trx and lock_sys.mutex is ownd
@return false for regular lock timeout */ @return false for regular lock timeout */
static static
bool bool
...@@ -394,11 +394,11 @@ lock_wait_suspend_thread( ...@@ -394,11 +394,11 @@ lock_wait_suspend_thread(
/* Only update the variable if we successfully /* Only update the variable if we successfully
retrieved the start and finish times. See Bug#36819. */ retrieved the start and finish times. See Bug#36819. */
if (diff_time > lock_sys->n_lock_max_wait_time if (diff_time > lock_sys.n_lock_max_wait_time
&& start_time != -1 && start_time != -1
&& finish_time != -1) { && finish_time != -1) {
lock_sys->n_lock_max_wait_time = diff_time; lock_sys.n_lock_max_wait_time = diff_time;
} }
/* Record the lock wait time for this thread */ /* Record the lock wait time for this thread */
...@@ -530,7 +530,7 @@ os_thread_ret_t ...@@ -530,7 +530,7 @@ os_thread_ret_t
DECLARE_THREAD(lock_wait_timeout_thread)(void*) DECLARE_THREAD(lock_wait_timeout_thread)(void*)
{ {
int64_t sig_count = 0; int64_t sig_count = 0;
os_event_t event = lock_sys->timeout_event; os_event_t event = lock_sys.timeout_event;
ut_ad(!srv_read_only_mode); ut_ad(!srv_read_only_mode);
...@@ -556,8 +556,8 @@ DECLARE_THREAD(lock_wait_timeout_thread)(void*) ...@@ -556,8 +556,8 @@ DECLARE_THREAD(lock_wait_timeout_thread)(void*)
/* Check all slots for user threads that are waiting /* Check all slots for user threads that are waiting
on locks, and if they have exceeded the time limit. */ on locks, and if they have exceeded the time limit. */
for (slot = lock_sys->waiting_threads; for (slot = lock_sys.waiting_threads;
slot < lock_sys->last_slot; slot < lock_sys.last_slot;
++slot) { ++slot) {
/* We are doing a read without the lock mutex /* We are doing a read without the lock mutex
...@@ -576,7 +576,7 @@ DECLARE_THREAD(lock_wait_timeout_thread)(void*) ...@@ -576,7 +576,7 @@ DECLARE_THREAD(lock_wait_timeout_thread)(void*)
} while (srv_shutdown_state < SRV_SHUTDOWN_CLEANUP); } while (srv_shutdown_state < SRV_SHUTDOWN_CLEANUP);
lock_sys->timeout_thread_active = false; lock_sys.timeout_thread_active = false;
/* We count the number of threads in os_thread_exit(). A created /* We count the number of threads in os_thread_exit(). A created
thread should always use that to exit and not use return() to exit. */ thread should always use that to exit and not use return() to exit. */
......
...@@ -1881,7 +1881,7 @@ logs_empty_and_mark_files_at_shutdown(void) ...@@ -1881,7 +1881,7 @@ logs_empty_and_mark_files_at_shutdown(void)
srv_shutdown_state = SRV_SHUTDOWN_CLEANUP; srv_shutdown_state = SRV_SHUTDOWN_CLEANUP;
loop: loop:
ut_ad(lock_sys || !srv_was_started); ut_ad(lock_sys.is_initialised() || !srv_was_started);
ut_ad(log_sys || !srv_was_started); ut_ad(log_sys || !srv_was_started);
ut_ad(fil_system || !srv_was_started); ut_ad(fil_system || !srv_was_started);
os_event_set(srv_buf_resize_event); os_event_set(srv_buf_resize_event);
...@@ -1890,8 +1890,8 @@ logs_empty_and_mark_files_at_shutdown(void) ...@@ -1890,8 +1890,8 @@ logs_empty_and_mark_files_at_shutdown(void)
os_event_set(srv_error_event); os_event_set(srv_error_event);
os_event_set(srv_monitor_event); os_event_set(srv_monitor_event);
os_event_set(srv_buf_dump_event); os_event_set(srv_buf_dump_event);
if (lock_sys) { if (lock_sys.timeout_thread_active) {
os_event_set(lock_sys->timeout_event); os_event_set(lock_sys.timeout_event);
} }
if (dict_stats_event) { if (dict_stats_event) {
os_event_set(dict_stats_event); os_event_set(dict_stats_event);
...@@ -1940,7 +1940,7 @@ logs_empty_and_mark_files_at_shutdown(void) ...@@ -1940,7 +1940,7 @@ logs_empty_and_mark_files_at_shutdown(void)
goto wait_suspend_loop; goto wait_suspend_loop;
} else if (srv_dict_stats_thread_active) { } else if (srv_dict_stats_thread_active) {
thread_name = "dict_stats_thread"; thread_name = "dict_stats_thread";
} else if (lock_sys && lock_sys->timeout_thread_active) { } else if (lock_sys.timeout_thread_active) {
thread_name = "lock_wait_timeout_thread"; thread_name = "lock_wait_timeout_thread";
} else if (srv_buf_dump_thread_active) { } else if (srv_buf_dump_thread_active) {
thread_name = "buf_dump_thread"; thread_name = "buf_dump_thread";
......
...@@ -1933,7 +1933,7 @@ srv_mon_process_existing_counter( ...@@ -1933,7 +1933,7 @@ srv_mon_process_existing_counter(
/* innodb_row_lock_time_max */ /* innodb_row_lock_time_max */
case MONITOR_OVLD_LOCK_MAX_WAIT_TIME: case MONITOR_OVLD_LOCK_MAX_WAIT_TIME:
value = lock_sys->n_lock_max_wait_time / 1000; value = lock_sys.n_lock_max_wait_time / 1000;
break; break;
/* innodb_row_lock_time_avg */ /* innodb_row_lock_time_avg */
......
...@@ -1581,7 +1581,7 @@ srv_export_innodb_status(void) ...@@ -1581,7 +1581,7 @@ srv_export_innodb_status(void)
} }
export_vars.innodb_row_lock_time_max = export_vars.innodb_row_lock_time_max =
lock_sys->n_lock_max_wait_time / 1000; lock_sys.n_lock_max_wait_time / 1000;
export_vars.innodb_rows_read = srv_stats.n_rows_read; export_vars.innodb_rows_read = srv_stats.n_rows_read;
...@@ -1717,7 +1717,7 @@ DECLARE_THREAD(srv_monitor_thread)(void*) ...@@ -1717,7 +1717,7 @@ DECLARE_THREAD(srv_monitor_thread)(void*)
if (srv_print_innodb_monitor) { if (srv_print_innodb_monitor) {
/* Reset mutex_skipped counter everytime /* Reset mutex_skipped counter everytime
srv_print_innodb_monitor changes. This is to srv_print_innodb_monitor changes. This is to
ensure we will not be blocked by lock_sys->mutex ensure we will not be blocked by lock_sys.mutex
for short duration information printing, for short duration information printing,
such as requested by sync_array_print_long_waits() */ such as requested by sync_array_print_long_waits() */
if (!last_srv_print_monitor) { if (!last_srv_print_monitor) {
......
...@@ -1262,7 +1262,7 @@ srv_shutdown_all_bg_threads() ...@@ -1262,7 +1262,7 @@ srv_shutdown_all_bg_threads()
if (srv_start_state_is_set(SRV_START_STATE_LOCK_SYS)) { if (srv_start_state_is_set(SRV_START_STATE_LOCK_SYS)) {
/* a. Let the lock timeout thread exit */ /* a. Let the lock timeout thread exit */
os_event_set(lock_sys->timeout_event); os_event_set(lock_sys.timeout_event);
} }
if (!srv_read_only_mode) { if (!srv_read_only_mode) {
...@@ -1853,7 +1853,7 @@ innobase_start_or_create_for_mysql() ...@@ -1853,7 +1853,7 @@ innobase_start_or_create_for_mysql()
log_sys_init(); log_sys_init();
recv_sys_init(); recv_sys_init();
lock_sys_create(srv_lock_table_size); lock_sys.create(srv_lock_table_size);
/* Create i/o-handler threads: */ /* Create i/o-handler threads: */
...@@ -2565,7 +2565,7 @@ innobase_start_or_create_for_mysql() ...@@ -2565,7 +2565,7 @@ innobase_start_or_create_for_mysql()
lock_wait_timeout_thread, lock_wait_timeout_thread,
NULL, thread_ids + 2 + SRV_MAX_N_IO_THREADS); NULL, thread_ids + 2 + SRV_MAX_N_IO_THREADS);
thread_started[2 + SRV_MAX_N_IO_THREADS] = true; thread_started[2 + SRV_MAX_N_IO_THREADS] = true;
lock_sys->timeout_thread_active = true; lock_sys.timeout_thread_active = true;
/* Create the thread which warns of long semaphore waits */ /* Create the thread which warns of long semaphore waits */
srv_error_monitor_active = true; srv_error_monitor_active = true;
...@@ -2836,7 +2836,7 @@ innodb_shutdown() ...@@ -2836,7 +2836,7 @@ innodb_shutdown()
ut_ad(trx_sys.is_initialised() || !srv_was_started); ut_ad(trx_sys.is_initialised() || !srv_was_started);
ut_ad(buf_dblwr || !srv_was_started || srv_read_only_mode ut_ad(buf_dblwr || !srv_was_started || srv_read_only_mode
|| srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO); || srv_force_recovery >= SRV_FORCE_NO_TRX_UNDO);
ut_ad(lock_sys || !srv_was_started); ut_ad(lock_sys.is_initialised() || !srv_was_started);
#ifdef BTR_CUR_HASH_ADAPT #ifdef BTR_CUR_HASH_ADAPT
ut_ad(btr_search_sys || !srv_was_started); ut_ad(btr_search_sys || !srv_was_started);
#endif /* BTR_CUR_HASH_ADAPT */ #endif /* BTR_CUR_HASH_ADAPT */
...@@ -2876,10 +2876,7 @@ innodb_shutdown() ...@@ -2876,10 +2876,7 @@ innodb_shutdown()
if (buf_dblwr) { if (buf_dblwr) {
buf_dblwr_free(); buf_dblwr_free();
} }
if (lock_sys) { lock_sys.close();
lock_sys_close();
}
trx_pool_close(); trx_pool_close();
/* We don't create these mutexes in RO mode because we don't create /* We don't create these mutexes in RO mode because we don't create
......
...@@ -811,7 +811,7 @@ LatchDebug::check_order( ...@@ -811,7 +811,7 @@ LatchDebug::check_order(
case SYNC_TRX: case SYNC_TRX:
/* Either the thread must own the lock_sys->mutex, or /* Either the thread must own the lock_sys.mutex, or
it is allowed to own only ONE trx_t::mutex. */ it is allowed to own only ONE trx_t::mutex. */
if (less(latches, level) != NULL) { if (less(latches, level) != NULL) {
......
...@@ -172,7 +172,7 @@ struct trx_i_s_cache_t { ...@@ -172,7 +172,7 @@ struct trx_i_s_cache_t {
ha_storage_t* storage; /*!< storage for external volatile ha_storage_t* storage; /*!< storage for external volatile
data that may become unavailable data that may become unavailable
when we release when we release
lock_sys->mutex or trx_sys.mutex */ lock_sys.mutex or trx_sys.mutex */
ulint mem_allocd; /*!< the amount of memory ulint mem_allocd; /*!< the amount of memory
allocated with mem_alloc*() */ allocated with mem_alloc*() */
bool is_truncated; /*!< this is true if the memory bool is_truncated; /*!< this is true if the memory
...@@ -537,9 +537,9 @@ fill_trx_row( ...@@ -537,9 +537,9 @@ fill_trx_row(
row->trx_tables_locked = lock_number_of_tables_locked(&trx->lock); row->trx_tables_locked = lock_number_of_tables_locked(&trx->lock);
/* These are protected by both trx->mutex or lock_sys->mutex, /* These are protected by both trx->mutex or lock_sys.mutex,
or just lock_sys->mutex. For reading, it suffices to hold or just lock_sys.mutex. For reading, it suffices to hold
lock_sys->mutex. */ lock_sys.mutex. */
row->trx_lock_structs = UT_LIST_GET_LEN(trx->lock.trx_locks); row->trx_lock_structs = UT_LIST_GET_LEN(trx->lock.trx_locks);
......
...@@ -2094,7 +2094,7 @@ trx_print_low( ...@@ -2094,7 +2094,7 @@ trx_print_low(
/**********************************************************************//** /**********************************************************************//**
Prints info about a transaction. Prints info about a transaction.
The caller must hold lock_sys->mutex. The caller must hold lock_sys.mutex.
When possible, use trx_print() instead. */ When possible, use trx_print() instead. */
void void
trx_print_latched( trx_print_latched(
...@@ -2114,7 +2114,7 @@ trx_print_latched( ...@@ -2114,7 +2114,7 @@ trx_print_latched(
/**********************************************************************//** /**********************************************************************//**
Prints info about a transaction. Prints info about a transaction.
Acquires and releases lock_sys->mutex. */ Acquires and releases lock_sys.mutex. */
void void
trx_print( trx_print(
/*======*/ /*======*/
...@@ -2156,7 +2156,7 @@ trx_assert_started( ...@@ -2156,7 +2156,7 @@ trx_assert_started(
/* trx->state can change from or to NOT_STARTED while we are holding /* trx->state can change from or to NOT_STARTED while we are holding
trx_sys.mutex for non-locking autocommit selects but not for other trx_sys.mutex for non-locking autocommit selects but not for other
types of transactions. It may change from ACTIVE to PREPARED. Unless types of transactions. It may change from ACTIVE to PREPARED. Unless
we are holding lock_sys->mutex, it may also change to COMMITTED. */ we are holding lock_sys.mutex, it may also change to COMMITTED. */
switch (trx->state) { switch (trx->state) {
case TRX_STATE_PREPARED: case TRX_STATE_PREPARED:
...@@ -2432,7 +2432,7 @@ static my_bool trx_get_trx_by_xid_callback(rw_trx_hash_element_t *element, ...@@ -2432,7 +2432,7 @@ static my_bool trx_get_trx_by_xid_callback(rw_trx_hash_element_t *element,
/** /**
Finds PREPARED XA transaction by xid. Finds PREPARED XA transaction by xid.
trx may have been committed, unless the caller is holding lock_sys->mutex. trx may have been committed, unless the caller is holding lock_sys.mutex.
@param[in] xid X/Open XA transaction identifier @param[in] xid X/Open XA transaction identifier
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment