Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
bf05180b
Commit
bf05180b
authored
Dec 04, 2008
by
Vadim Tkachenko
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
innodb_io_patches.patch merged
parent
d2a38600
Changes
12
Hide whitespace changes
Inline
Side-by-side
Showing
12 changed files
with
250 additions
and
576 deletions
+250
-576
buf/buf0rea.c
buf/buf0rea.c
+8
-0
handler/ha_innodb.cc
handler/ha_innodb.cc
+33
-0
include/os0file.h
include/os0file.h
+4
-2
include/srv0srv.h
include/srv0srv.h
+7
-0
include/sync0rw.h
include/sync0rw.h
+13
-32
include/sync0rw.ic
include/sync0rw.ic
+24
-247
log/log0log.c
log/log0log.c
+9
-0
os/os0file.c
os/os0file.c
+26
-4
srv/srv0srv.c
srv/srv0srv.c
+62
-11
srv/srv0start.c
srv/srv0start.c
+8
-4
sync/sync0arr.c
sync/sync0arr.c
+21
-27
sync/sync0rw.c
sync/sync0rw.c
+35
-249
No files found.
buf/buf0rea.c
View file @
bf05180b
...
...
@@ -187,6 +187,10 @@ buf_read_ahead_random(
ulint
i
;
ulint
buf_read_ahead_random_area
;
if
(
!
(
srv_read_ahead
&
1
))
{
return
(
0
);
}
if
(
srv_startup_is_before_trx_rollback_phase
)
{
/* No read-ahead to avoid thread deadlocks */
return
(
0
);
...
...
@@ -412,6 +416,10 @@ buf_read_ahead_linear(
const
ulint
buf_read_ahead_linear_area
=
BUF_READ_AHEAD_LINEAR_AREA
;
if
(
!
(
srv_read_ahead
&
2
))
{
return
(
0
);
}
if
(
UNIV_UNLIKELY
(
srv_startup_is_before_trx_rollback_phase
))
{
/* No read-ahead to avoid thread deadlocks */
return
(
0
);
...
...
handler/ha_innodb.cc
View file @
bf05180b
...
...
@@ -137,6 +137,7 @@ static long innobase_mirrored_log_groups, innobase_log_files_in_group,
innobase_force_recovery
,
innobase_open_files
,
innobase_autoinc_lock_mode
;
static
unsigned
long
innobase_read_io_threads
,
innobase_write_io_threads
;
static
long
long
innobase_buffer_pool_size
,
innobase_log_file_size
;
/* The default values for the following char* start-up parameters
...
...
@@ -2066,6 +2067,8 @@ innobase_init(
srv_mem_pool_size
=
(
ulint
)
innobase_additional_mem_pool_size
;
srv_n_file_io_threads
=
(
ulint
)
innobase_file_io_threads
;
srv_n_read_io_threads
=
(
ulint
)
innobase_read_io_threads
;
srv_n_write_io_threads
=
(
ulint
)
innobase_write_io_threads
;
srv_force_recovery
=
(
ulint
)
innobase_force_recovery
;
...
...
@@ -9558,6 +9561,31 @@ static MYSQL_SYSVAR_STR(version, innodb_version_str,
PLUGIN_VAR_NOCMDOPT
|
PLUGIN_VAR_READONLY
,
"InnoDB version"
,
NULL
,
NULL
,
INNODB_VERSION_STR
);
static
MYSQL_SYSVAR_ULONG
(
io_capacity
,
srv_io_capacity
,
PLUGIN_VAR_RQCMDARG
,
"Number of IO operations per second the server can do. Tunes background IO rate."
,
NULL
,
NULL
,
100
,
100
,
999999999
,
0
);
static
MYSQL_SYSVAR_ULONG
(
read_ahead
,
srv_read_ahead
,
PLUGIN_VAR_RQCMDARG
,
"Enable/Diasable read aheads bit0:random bit1:linear"
,
NULL
,
NULL
,
3
,
0
,
3
,
0
);
static
MYSQL_SYSVAR_ULONG
(
adaptive_checkpoint
,
srv_adaptive_checkpoint
,
PLUGIN_VAR_RQCMDARG
,
"Enable/Diasable flushing along modified age 0:disable 1:enable"
,
NULL
,
NULL
,
0
,
0
,
1
,
0
);
static
MYSQL_SYSVAR_ULONG
(
read_io_threads
,
innobase_read_io_threads
,
PLUGIN_VAR_RQCMDARG
|
PLUGIN_VAR_READONLY
,
"Number of background read I/O threads in InnoDB."
,
NULL
,
NULL
,
1
,
1
,
64
,
0
);
static
MYSQL_SYSVAR_ULONG
(
write_io_threads
,
innobase_write_io_threads
,
PLUGIN_VAR_RQCMDARG
|
PLUGIN_VAR_READONLY
,
"Number of background write I/O threads in InnoDB."
,
NULL
,
NULL
,
1
,
1
,
64
,
0
);
static
struct
st_mysql_sys_var
*
innobase_system_variables
[]
=
{
MYSQL_SYSVAR
(
additional_mem_pool_size
),
MYSQL_SYSVAR
(
autoextend_increment
),
...
...
@@ -9604,6 +9632,11 @@ static struct st_mysql_sys_var* innobase_system_variables[]= {
MYSQL_SYSVAR
(
thread_sleep_delay
),
MYSQL_SYSVAR
(
autoinc_lock_mode
),
MYSQL_SYSVAR
(
version
),
MYSQL_SYSVAR
(
io_capacity
),
MYSQL_SYSVAR
(
read_ahead
),
MYSQL_SYSVAR
(
adaptive_checkpoint
),
MYSQL_SYSVAR
(
read_io_threads
),
MYSQL_SYSVAR
(
write_io_threads
),
NULL
};
...
...
include/os0file.h
View file @
bf05180b
...
...
@@ -561,8 +561,10 @@ os_aio_init(
/*========*/
ulint
n
,
/* in: maximum number of pending aio operations
allowed; n must be divisible by n_segments */
ulint
n_segments
,
/* in: combined number of segments in the four
first aio arrays; must be >= 4 */
// ulint n_segments, /* in: combined number of segments in the four
// first aio arrays; must be >= 4 */
ulint
n_read_threads
,
/* n_segments == 2 + n_read_threads + n_write_threads */
ulint
n_write_threads
,
/**/
ulint
n_slots_sync
);
/* in: number of slots in the sync aio array */
/***********************************************************************
Requests an asynchronous i/o operation. */
...
...
include/srv0srv.h
View file @
bf05180b
...
...
@@ -100,6 +100,8 @@ extern ulint srv_mem_pool_size;
extern
ulint
srv_lock_table_size
;
extern
ulint
srv_n_file_io_threads
;
extern
ulint
srv_n_read_io_threads
;
extern
ulint
srv_n_write_io_threads
;
#ifdef UNIV_LOG_ARCHIVE
extern
ibool
srv_log_archive_on
;
...
...
@@ -144,6 +146,11 @@ extern ulong srv_max_buf_pool_modified_pct;
extern
ulong
srv_max_purge_lag
;
extern
ulong
srv_replication_delay
;
extern
ulint
srv_io_capacity
;
extern
ulint
srv_read_ahead
;
extern
ulint
srv_adaptive_checkpoint
;
/*-------------------------------------------*/
extern
ulint
srv_n_rows_inserted
;
...
...
include/sync0rw.h
View file @
bf05180b
...
...
@@ -328,17 +328,7 @@ rw_lock_get_x_lock_count(
Accessor functions for rw lock. */
UNIV_INLINE
ulint
rw_lock_get_s_waiters
(
/*==================*/
rw_lock_t
*
lock
);
UNIV_INLINE
ulint
rw_lock_get_x_waiters
(
/*==================*/
rw_lock_t
*
lock
);
UNIV_INLINE
ulint
rw_lock_get_wx_waiters
(
rw_lock_get_waiters
(
/*================*/
rw_lock_t
*
lock
);
UNIV_INLINE
...
...
@@ -422,11 +412,6 @@ rw_lock_debug_print(
rw_lock_debug_t
*
info
);
/* in: debug struct */
#endif
/* UNIV_SYNC_DEBUG */
#ifdef HAVE_GCC_ATOMIC_BUILTINS
/* This value means NOT_LOCKED */
#define RW_LOCK_BIAS 0x00100000
#endif
/* NOTE! The structure appears here only for the compiler to know its size.
Do not use its fields directly! The structure used in the spin lock
implementation of a read-write lock. Several threads may have a shared lock
...
...
@@ -436,9 +421,9 @@ blocked by readers, a writer may queue for the lock by setting the writer
field. Then no new readers are allowed in. */
struct
rw_lock_struct
{
/* Used by sync0arr.c for thread queueing */
os_event_t
s_event
;
/* Used for s_lock */
os_event_t
x_event
;
/* Used for x_lock */
os_event_t
event
;
/* Used by sync0arr.c for thread queueing */
#ifdef __WIN__
os_event_t
wait_ex_event
;
/* This windows specific event is
used by the thread which has set the
lock state to RW_LOCK_WAIT_EX. The
...
...
@@ -446,34 +431,30 @@ struct rw_lock_struct {
thread will be the next one to proceed
once the current the event gets
signalled. See LEMMA 2 in sync0sync.c */
#ifdef HAVE_GCC_ATOMIC_BUILTINS
volatile
lint
lock_word
;
/* Used by using atomic builtin */
#endif
volatile
ulint
reader_count
;
/* Number of readers who have locked this
ulint
reader_count
;
/* Number of readers who have locked this
lock in the shared mode */
volatile
ulint
writer
;
/* This field is set to RW_LOCK_EX if there
ulint
writer
;
/* This field is set to RW_LOCK_EX if there
is a writer owning the lock (in exclusive
mode), RW_LOCK_WAIT_EX if a writer is
queueing for the lock, and
RW_LOCK_NOT_LOCKED, otherwise. */
volatile
os_thread_id_t
writer_thread
;
os_thread_id_t
writer_thread
;
/* Thread id of a possible writer thread */
volatile
ulint
writer_count
;
/* Number of times the same thread has
ulint
writer_count
;
/* Number of times the same thread has
recursively locked the lock in the exclusive
mode */
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_t
mutex
;
/* The mutex protecting rw_lock_struct */
#endif
ulint
pass
;
/* Default value 0. This is set to some
value != 0 given by the caller of an x-lock
operation, if the x-lock is to be passed to
another thread to unlock (which happens in
asynchronous i/o). */
volatile
ulint
s_waiters
;
/* 1: there are waiters (s_lock) */
volatile
ulint
x_waiters
;
/* 1: there are waiters (x_lock) */
volatile
ulint
wait_ex_waiters
;
/* 1: there are waiters (wait_ex) */
ulint
waiters
;
/* This ulint is set to 1 if there are
waiters (readers or writers) in the global
wait array, waiting for this rw_lock.
Otherwise, == 0. */
UT_LIST_NODE_T
(
rw_lock_t
)
list
;
/* All allocated rw locks are put into a
list */
...
...
@@ -486,7 +467,7 @@ struct rw_lock_struct {
const
char
*
cfile_name
;
/* File name where lock created */
const
char
*
last_s_file_name
;
/* File name where last s-locked */
const
char
*
last_x_file_name
;
/* File name where last x-locked */
volatile
ibool
writer_is_wait_ex
;
ibool
writer_is_wait_ex
;
/* This is TRUE if the writer field is
RW_LOCK_WAIT_EX; this field is located far
from the memory update hotspot fields which
...
...
include/sync0rw.ic
View file @
bf05180b
...
...
@@ -47,52 +47,20 @@ rw_lock_remove_debug_info(
Accessor functions for rw lock. */
UNIV_INLINE
ulint
rw_lock_get_
s_
waiters(
rw_lock_get_waiters(
/*================*/
rw_lock_t* lock)
{
return(lock->s_waiters);
}
UNIV_INLINE
ulint
rw_lock_get_x_waiters(
/*================*/
rw_lock_t* lock)
{
return(lock->x_waiters);
}
UNIV_INLINE
ulint
rw_lock_get_wx_waiters(
/*================*/
rw_lock_t* lock)
{
return(lock->wait_ex_waiters);
return(lock->waiters);
}
UNIV_INLINE
void
rw_lock_set_s_waiters(
rw_lock_t* lock,
ulint flag)
{
lock->s_waiters = flag;
}
UNIV_INLINE
void
rw_lock_set_x_waiters(
rw_lock_set_waiters(
/*================*/
rw_lock_t* lock,
ulint flag)
{
lock->x_waiters = flag;
}
UNIV_INLINE
void
rw_lock_set_wx_waiters(
/*================*/
rw_lock_t* lock,
ulint flag)
{
lock->wait_ex_waiters = flag;
lock->waiters = flag;
}
UNIV_INLINE
ulint
...
...
@@ -100,19 +68,7 @@ rw_lock_get_writer(
/*===============*/
rw_lock_t* lock)
{
#ifdef HAVE_GCC_ATOMIC_BUILTINS
if (lock->writer == RW_LOCK_NOT_LOCKED) {
return(RW_LOCK_NOT_LOCKED);
}
if (lock->writer_is_wait_ex) {
return(RW_LOCK_WAIT_EX);
} else {
return(RW_LOCK_EX);
}
#else
return(lock->writer);
#endif
}
UNIV_INLINE
void
...
...
@@ -140,7 +96,6 @@ rw_lock_set_reader_count(
{
lock->reader_count = count;
}
#ifndef HAVE_GCC_ATOMIC_BUILTINS
UNIV_INLINE
mutex_t*
rw_lock_get_mutex(
...
...
@@ -149,7 +104,6 @@ rw_lock_get_mutex(
{
return(&(lock->mutex));
}
#endif
/**********************************************************************
Returns the value of writer_count for the lock. Does not reserve the lock
...
...
@@ -179,27 +133,13 @@ rw_lock_s_lock_low(
const char* file_name, /* in: file name where lock requested */
ulint line) /* in: line where requested */
{
#ifndef HAVE_GCC_ATOMIC_BUILTINS
ut_ad(mutex_own(rw_lock_get_mutex(lock)));
#endif
/* Check if the writer field is free */
#ifdef HAVE_GCC_ATOMIC_BUILTINS
if (UNIV_LIKELY(rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED)) {
/* try s-lock */
if(__sync_sub_and_fetch(&(lock->lock_word),1) <= 0) {
/* fail */
__sync_fetch_and_add(&(lock->lock_word),1);
return(FALSE); /* locking did not succeed */
}
/* success */
__sync_fetch_and_add(&(lock->reader_count),1);
#else
if (UNIV_LIKELY(lock->writer == RW_LOCK_NOT_LOCKED)) {
/* Set the shared lock by incrementing the reader count */
lock->reader_count++;
#endif
#ifdef UNIV_SYNC_DEBUG
rw_lock_add_debug_info(lock, pass, RW_LOCK_SHARED, file_name,
...
...
@@ -226,15 +166,11 @@ rw_lock_s_lock_direct(
const char* file_name, /* in: file name where requested */
ulint line) /* in: line where lock requested */
{
ut_ad(
rw_lock_get_writer(lock)
== RW_LOCK_NOT_LOCKED);
ut_ad(
lock->writer
== RW_LOCK_NOT_LOCKED);
ut_ad(rw_lock_get_reader_count(lock) == 0);
/* Set the shared lock by incrementing the reader count */
#ifdef HAVE_GCC_ATOMIC_BUILTINS
__sync_fetch_and_add(&(lock->reader_count),1);
#else
lock->reader_count++;
#endif
lock->last_s_file_name = file_name;
lock->last_s_line = line;
...
...
@@ -262,11 +198,7 @@ rw_lock_x_lock_direct(
rw_lock_set_writer(lock, RW_LOCK_EX);
lock->writer_thread = os_thread_get_curr_id();
#ifdef HAVE_GCC_ATOMIC_BUILTINS
__sync_fetch_and_add(&(lock->writer_count),1);
#else
lock->writer_count++;
#endif
lock->pass = 0;
lock->last_x_file_name = file_name;
...
...
@@ -308,21 +240,15 @@ rw_lock_s_lock_func(
ut_ad(!rw_lock_own(lock, RW_LOCK_SHARED)); /* see NOTE above */
#endif /* UNIV_SYNC_DEBUG */
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_enter(rw_lock_get_mutex(lock));
#endif
if (UNIV_LIKELY(rw_lock_s_lock_low(lock, pass, file_name, line))) {
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_exit(rw_lock_get_mutex(lock));
#endif
return; /* Success */
} else {
/* Did not succeed, try spin wait */
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_exit(rw_lock_get_mutex(lock));
#endif
rw_lock_s_lock_spin(lock, pass, file_name, line);
...
...
@@ -345,23 +271,11 @@ rw_lock_s_lock_func_nowait(
{
ibool success = FALSE;
#ifdef HAVE_GCC_ATOMIC_BUILTINS
if (rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED) {
/* try s-lock */
if(__sync_sub_and_fetch(&(lock->lock_word),1) <= 0) {
/* fail */
__sync_fetch_and_add(&(lock->lock_word),1);
return(FALSE); /* locking did not succeed */
}
/* success */
__sync_fetch_and_add(&(lock->reader_count),1);
#else
mutex_enter(rw_lock_get_mutex(lock));
if (lock->writer == RW_LOCK_NOT_LOCKED) {
/* Set the shared lock by incrementing the reader count */
lock->reader_count++;
#endif
#ifdef UNIV_SYNC_DEBUG
rw_lock_add_debug_info(lock, 0, RW_LOCK_SHARED, file_name,
...
...
@@ -374,9 +288,7 @@ rw_lock_s_lock_func_nowait(
success = TRUE;
}
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_exit(rw_lock_get_mutex(lock));
#endif
return(success);
}
...
...
@@ -396,55 +308,6 @@ rw_lock_x_lock_func_nowait(
{
ibool success = FALSE;
os_thread_id_t curr_thread = os_thread_get_curr_id();
#ifdef HAVE_GCC_ATOMIC_BUILTINS
if ((lock->lock_word == RW_LOCK_BIAS)
&& rw_lock_get_writer(lock) == RW_LOCK_NOT_LOCKED) {
/* try x-lock */
if(__sync_sub_and_fetch(&(lock->lock_word),
RW_LOCK_BIAS) == 0) {
/* success */
/* try to lock writer */
if(__sync_lock_test_and_set(&(lock->writer),RW_LOCK_EX)
== RW_LOCK_NOT_LOCKED) {
/* success */
lock->writer_thread = curr_thread;
lock->pass = 0;
lock->writer_is_wait_ex = FALSE;
/* next function may work as memory barrier */
relock:
__sync_fetch_and_add(&(lock->writer_count),1);
#ifdef UNIV_SYNC_DEBUG
rw_lock_add_debug_info(lock, 0, RW_LOCK_EX, file_name, line);
#endif
lock->last_x_file_name = file_name;
lock->last_x_line = line;
ut_ad(rw_lock_validate(lock));
return(TRUE);
} else {
/* x-unlock */
__sync_fetch_and_add(&(lock->lock_word),
RW_LOCK_BIAS);
}
} else {
/* fail (x-lock) */
__sync_fetch_and_add(&(lock->lock_word),RW_LOCK_BIAS);
}
}
if (lock->pass == 0
&& os_thread_eq(lock->writer_thread, curr_thread)
&& rw_lock_get_writer(lock) == RW_LOCK_EX) {
goto relock;
}
ut_ad(rw_lock_validate(lock));
return(FALSE);
#else
mutex_enter(rw_lock_get_mutex(lock));
if (UNIV_UNLIKELY(rw_lock_get_reader_count(lock) != 0)) {
...
...
@@ -475,7 +338,6 @@ relock:
ut_ad(rw_lock_validate(lock));
return(success);
#endif
}
/**********************************************************************
...
...
@@ -491,33 +353,16 @@ rw_lock_s_unlock_func(
#endif
)
{
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_t* mutex = &(lock->mutex);
#endif
ibool x_sg = FALSE;
ibool wx_sg = FALSE;
#ifdef HAVE_GCC_ATOMIC_BUILTINS
ibool last = FALSE;
#endif
ibool sg = FALSE;
#ifndef HAVE_GCC_ATOMIC_BUILTINS
/* Acquire the mutex protecting the rw-lock fields */
mutex_enter(mutex);
#endif
/* Reset the shared lock by decrementing the reader count */
ut_a(lock->reader_count > 0);
#ifdef HAVE_GCC_ATOMIC_BUILTINS
/* unlock lock_word */
__sync_fetch_and_add(&(lock->lock_word),1);
if(__sync_sub_and_fetch(&(lock->reader_count),1) == 0) {
last = TRUE;
}
#else
lock->reader_count--;
#endif
#ifdef UNIV_SYNC_DEBUG
rw_lock_remove_debug_info(lock, pass, RW_LOCK_SHARED);
...
...
@@ -526,36 +371,20 @@ rw_lock_s_unlock_func(
/* If there may be waiters and this was the last s-lock,
signal the object */
#ifdef HAVE_GCC_ATOMIC_BUILTINS
if (UNIV_UNLIKELY(last && lock->wait_ex_waiters)) {
#else
if (UNIV_UNLIKELY(lock->wait_ex_waiters)
if (UNIV_UNLIKELY(lock->waiters)
&& lock->reader_count == 0) {
#endif
wx_sg = TRUE;
sg = TRUE;
rw_lock_set_w
x_w
aiters(lock, 0);
rw_lock_set_waiters(lock, 0);
}
#ifdef HAVE_GCC_ATOMIC_BUILTINS
else if (UNIV_UNLIKELY(last && lock->x_waiters)) {
#else
else if (UNIV_UNLIKELY(lock->x_waiters)
&& lock->reader_count == 0) {
#endif
x_sg = TRUE;
rw_lock_set_x_waiters(lock, 0);
}
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_exit(mutex);
#endif
if (UNIV_UNLIKELY(wx_sg)) {
if (UNIV_UNLIKELY(sg)) {
#ifdef __WIN__
os_event_set(lock->wait_ex_event);
sync_array_object_signalled(sync_primary_wait_array);
} else if (UNIV_UNLIKELY(x_sg)) {
os_event_set(lock->x_event);
#endif
os_event_set(lock->event);
sync_array_object_signalled(sync_primary_wait_array);
}
...
...
@@ -579,19 +408,13 @@ rw_lock_s_unlock_direct(
ut_ad(lock->reader_count > 0);
#ifdef HAVE_GCC_ATOMIC_BUILTINS
__sync_sub_and_fetch(&(lock->reader_count),1);
#else
lock->reader_count--;
#endif
#ifdef UNIV_SYNC_DEBUG
rw_lock_remove_debug_info(lock, 0, RW_LOCK_SHARED);
#endif
ut_ad(!lock->s_waiters);
ut_ad(!lock->x_waiters);
ut_ad(!lock->wait_ex_waiters);
ut_ad(!lock->waiters);
ut_ad(rw_lock_validate(lock));
#ifdef UNIV_SYNC_PERF_STAT
rw_s_exit_count++;
...
...
@@ -611,81 +434,41 @@ rw_lock_x_unlock_func(
#endif
)
{
#ifdef HAVE_GCC_ATOMIC_BUILTINS
ibool last = FALSE;
#endif
ibool s_sg = FALSE;
ibool x_sg = FALSE;
ibool sg = FALSE;
#ifndef HAVE_GCC_ATOMIC_BUILTINS
/* Acquire the mutex protecting the rw-lock fields */
mutex_enter(&(lock->mutex));
#endif
/* Reset the exclusive lock if this thread no longer has an x-mode
lock */
ut_ad(lock->writer_count > 0);
#ifdef HAVE_GCC_ATOMIC_BUILTINS
if(__sync_sub_and_fetch(&(lock->writer_count),1) == 0) {
last = TRUE;
}
if (last) {
/* unlock lock_word */
__sync_fetch_and_add(&(lock->lock_word),RW_LOCK_BIAS);
/* FIXME: It is a value of bad manners for pthread.
But we shouldn't keep an ID of not-owner. */
lock->writer_thread = -1;
/* atomic operation may be safer about memory order. */
rw_lock_set_writer(lock, RW_LOCK_NOT_LOCKED);
__sync_synchronize();
}
#else
lock->writer_count--;
if (lock->writer_count == 0) {
rw_lock_set_writer(lock, RW_LOCK_NOT_LOCKED);
}
#endif
#ifdef UNIV_SYNC_DEBUG
rw_lock_remove_debug_info(lock, pass, RW_LOCK_EX);
#endif
/* If there may be waiters, signal the lock */
#ifdef HAVE_GCC_ATOMIC_BUILTINS
if (last) {
#else
if (lock->writer_count == 0) {
#endif
if(lock->s_waiters){
s_sg = TRUE;
rw_lock_set_s_waiters(lock, 0);
}
if(lock->x_waiters){
x_sg = TRUE;
rw_lock_set_x_waiters(lock, 0);
}
if (UNIV_UNLIKELY(lock->waiters)
&& lock->writer_count == 0) {
sg = TRUE;
rw_lock_set_waiters(lock, 0);
}
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_exit(&(lock->mutex));
#endif
if (UNIV_UNLIKELY(s_sg)) {
os_event_set(lock->s_event);
sync_array_object_signalled(sync_primary_wait_array);
}
if (UNIV_UNLIKELY(x_sg)) {
if (UNIV_UNLIKELY(sg)) {
#ifdef __WIN__
/* I doubt the necessity of it. */
os_event_set(lock->wait_ex_event);
#endif
os_event_set(lock->
x_
event);
os_event_set(lock->event);
sync_array_object_signalled(sync_primary_wait_array);
}
...
...
@@ -710,13 +493,9 @@ rw_lock_x_unlock_direct(
ut_ad(lock->writer_count > 0);
#ifdef HAVE_GCC_ATOMIC_BUILTINS
if(__sync_sub_and_fetch(&(lock->writer_count),1) == 0) {
#else
lock->writer_count--;
if (lock->writer_count == 0) {
#endif
rw_lock_set_writer(lock, RW_LOCK_NOT_LOCKED);
}
...
...
@@ -724,9 +503,7 @@ rw_lock_x_unlock_direct(
rw_lock_remove_debug_info(lock, 0, RW_LOCK_EX);
#endif
ut_ad(!lock->s_waiters);
ut_ad(!lock->x_waiters);
ut_ad(!lock->wait_ex_waiters);
ut_ad(!lock->waiters);
ut_ad(rw_lock_validate(lock));
#ifdef UNIV_SYNC_PERF_STAT
...
...
log/log0log.c
View file @
bf05180b
...
...
@@ -3258,6 +3258,15 @@ log_print(
log_sys
->
flushed_to_disk_lsn
,
log_sys
->
last_checkpoint_lsn
);
fprintf
(
file
,
"Max checkpoint age %lu
\n
"
"Modified age %lu
\n
"
"Checkpoint age %lu
\n
"
,
(
ulong
)
log_sys
->
max_checkpoint_age
,
(
ulong
)
ut_dulint_minus
(
log_sys
->
lsn
,
log_buf_pool_get_oldest_modification
()),
(
ulong
)
ut_dulint_minus
(
log_sys
->
lsn
,
log_sys
->
last_checkpoint_lsn
));
current_time
=
time
(
NULL
);
time_elapsed
=
0
.
001
+
difftime
(
current_time
,
...
...
os/os0file.c
View file @
bf05180b
...
...
@@ -2920,8 +2920,10 @@ os_aio_init(
/*========*/
ulint
n
,
/* in: maximum number of pending aio operations
allowed; n must be divisible by n_segments */
ulint
n_segments
,
/* in: combined number of segments in the four
first aio arrays; must be >= 4 */
// ulint n_segments, /* in: combined number of segments in the four
// first aio arrays; must be >= 4 */
ulint
n_read_threads
,
/* n_segments == 2 + n_read_threads + n_write_threads*/
ulint
n_write_threads
,
/**/
ulint
n_slots_sync
)
/* in: number of slots in the sync aio array */
{
ulint
n_read_segs
;
...
...
@@ -2929,6 +2931,8 @@ os_aio_init(
ulint
n_per_seg
;
ulint
i
;
ulint
n_segments
=
2
+
n_read_threads
+
n_write_threads
;
ut_ad
(
n
%
n_segments
==
0
);
ut_ad
(
n_segments
>=
4
);
...
...
@@ -2939,8 +2943,8 @@ os_aio_init(
}
n_per_seg
=
n
/
n_segments
;
n_write_segs
=
(
n_segments
-
2
)
/
2
;
n_read_segs
=
n_
segments
-
2
-
n_write_seg
s
;
n_write_segs
=
n_write_threads
;
n_read_segs
=
n_
read_thread
s
;
/* fprintf(stderr, "Array n per seg %lu\n", n_per_seg); */
...
...
@@ -3140,6 +3144,13 @@ os_aio_array_reserve_slot(
OVERLAPPED
*
control
;
#endif
ulint
i
;
ulint
prim_segment
;
ulint
n
;
n
=
array
->
n_slots
/
array
->
n_segments
;
/* 64 blocks' striping ( aligning max(BUF_READ_AHEAD_AREA) ) */
prim_segment
=
(
offset
>>
(
UNIV_PAGE_SIZE_SHIFT
+
6
)
)
%
(
array
->
n_segments
);
loop:
os_mutex_enter
(
array
->
mutex
);
...
...
@@ -3158,6 +3169,16 @@ os_aio_array_reserve_slot(
goto
loop
;
}
for
(
i
=
prim_segment
*
n
;
i
<
array
->
n_slots
;
i
++
)
{
slot
=
os_aio_array_get_nth_slot
(
array
,
i
);
if
(
slot
->
reserved
==
FALSE
)
{
break
;
}
}
if
(
slot
->
reserved
==
TRUE
){
/* Not found after the intended segment. So we should search before. */
for
(
i
=
0
;;
i
++
)
{
slot
=
os_aio_array_get_nth_slot
(
array
,
i
);
...
...
@@ -3165,6 +3186,7 @@ os_aio_array_reserve_slot(
break
;
}
}
}
array
->
n_reserved
++
;
...
...
srv/srv0srv.c
View file @
bf05180b
...
...
@@ -147,6 +147,8 @@ UNIV_INTERN ulint srv_mem_pool_size = ULINT_MAX;
UNIV_INTERN
ulint
srv_lock_table_size
=
ULINT_MAX
;
UNIV_INTERN
ulint
srv_n_file_io_threads
=
ULINT_MAX
;
ulint
srv_n_read_io_threads
=
1
;
ulint
srv_n_write_io_threads
=
1
;
#ifdef UNIV_LOG_ARCHIVE
UNIV_INTERN
ibool
srv_log_archive_on
=
FALSE
;
...
...
@@ -311,6 +313,15 @@ UNIV_INTERN int srv_query_thread_priority = 0;
UNIV_INTERN
ulong
srv_replication_delay
=
0
;
ulint
srv_io_capacity
=
100
;
/* Returns the number of IO operations that is X percent of the capacity.
PCT_IO(5) -> returns the number of IO operations that is 5% of the max
where max is srv_io_capacity. */
#define PCT_IO(pct) ((ulint) (srv_io_capacity * ((double) pct / 100.0)))
ulint
srv_read_ahead
=
3
;
/* 1: random 2: linear 3: Both */
ulint
srv_adaptive_checkpoint
=
0
;
/* 0:disable 1:enable */
/*-------------------------------------------*/
UNIV_INTERN
ulong
srv_n_spin_wait_rounds
=
20
;
UNIV_INTERN
ulong
srv_n_free_tickets_to_enter
=
500
;
...
...
@@ -2203,6 +2214,8 @@ srv_master_thread(
ibool
skip_sleep
=
FALSE
;
ulint
i
;
dulint
oldest_lsn
;
#ifdef UNIV_DEBUG_THREAD_CREATION
fprintf
(
stderr
,
"Master thread starts, id %lu
\n
"
,
os_thread_pf
(
os_thread_get_curr_id
()));
...
...
@@ -2293,7 +2306,7 @@ srv_master_thread(
if
(
n_pend_ios
<
3
&&
(
n_ios
-
n_ios_old
<
5
))
{
srv_main_thread_op_info
=
"doing insert buffer merge"
;
ibuf_contract_for_n_pages
(
TRUE
,
srv_insert_buffer_batch_size
/
4
);
TRUE
,
PCT_IO
((
srv_insert_buffer_batch_size
/
4
))
);
srv_main_thread_op_info
=
"flushing log"
;
...
...
@@ -2306,7 +2319,7 @@ srv_master_thread(
/* Try to keep the number of modified pages in the
buffer pool under the limit wished by the user */
n_pages_flushed
=
buf_flush_batch
(
BUF_FLUSH_LIST
,
100
,
n_pages_flushed
=
buf_flush_batch
(
BUF_FLUSH_LIST
,
PCT_IO
(
100
)
,
IB_ULONGLONG_MAX
);
/* If we had to do the flush, it may have taken
...
...
@@ -2315,6 +2328,44 @@ srv_master_thread(
iteration of this loop. */
skip_sleep
=
TRUE
;
}
else
if
(
srv_adaptive_checkpoint
)
{
/* Try to keep modified age not to exceed
max_checkpoint_age * 7/8 line */
mutex_enter
(
&
(
log_sys
->
mutex
));
oldest_lsn
=
buf_pool_get_oldest_modification
();
if
(
ut_dulint_is_zero
(
oldest_lsn
))
{
mutex_exit
(
&
(
log_sys
->
mutex
));
}
else
{
if
(
ut_dulint_minus
(
log_sys
->
lsn
,
oldest_lsn
)
>
(
log_sys
->
max_checkpoint_age
)
-
((
log_sys
->
max_checkpoint_age
)
/
4
))
{
/* 2nd defence line (max_checkpoint_age * 3/4) */
mutex_exit
(
&
(
log_sys
->
mutex
));
n_pages_flushed
=
buf_flush_batch
(
BUF_FLUSH_LIST
,
PCT_IO
(
100
),
ut_dulint_max
);
skip_sleep
=
TRUE
;
}
else
if
(
ut_dulint_minus
(
log_sys
->
lsn
,
oldest_lsn
)
>
(
log_sys
->
max_checkpoint_age
)
/
2
)
{
/* 1st defence line (max_checkpoint_age * 1/2) */
mutex_exit
(
&
(
log_sys
->
mutex
));
n_pages_flushed
=
buf_flush_batch
(
BUF_FLUSH_LIST
,
PCT_IO
(
10
),
ut_dulint_max
);
skip_sleep
=
TRUE
;
}
else
{
mutex_exit
(
&
(
log_sys
->
mutex
));
}
}
}
if
(
srv_activity_count
==
old_activity_count
)
{
...
...
@@ -2341,10 +2392,10 @@ srv_master_thread(
n_pend_ios
=
buf_get_n_pending_ios
()
+
log_sys
->
n_pending_writes
;
n_ios
=
log_sys
->
n_log_ios
+
buf_pool
->
n_pages_read
+
buf_pool
->
n_pages_written
;
if
(
n_pend_ios
<
3
&&
(
n_ios
-
n_ios_very_old
<
200
))
{
srv_main_thread_op_info
=
"flushing buffer pool pages"
;
buf_flush_batch
(
BUF_FLUSH_LIST
,
100
,
IB_ULONGLONG_MAX
);
if
(
n_pend_ios
<
3
&&
(
n_ios
-
n_ios_very_old
<
PCT_IO
(
200
)
))
{
srv_main_thread_op_info
=
"flushing buffer pool pages"
;
buf_flush_batch
(
BUF_FLUSH_LIST
,
PCT_IO
(
100
)
,
IB_ULONGLONG_MAX
);
srv_main_thread_op_info
=
"flushing log"
;
log_buffer_flush_to_disk
();
...
...
@@ -2354,7 +2405,7 @@ srv_master_thread(
even if the server were active */
srv_main_thread_op_info
=
"doing insert buffer merge"
;
ibuf_contract_for_n_pages
(
TRUE
,
srv_insert_buffer_batch_size
/
4
);
ibuf_contract_for_n_pages
(
TRUE
,
PCT_IO
((
srv_insert_buffer_batch_size
/
4
))
);
srv_main_thread_op_info
=
"flushing log"
;
log_buffer_flush_to_disk
();
...
...
@@ -2394,14 +2445,14 @@ srv_master_thread(
(> 70 %), we assume we can afford reserving the disk(s) for
the time it requires to flush 100 pages */
n_pages_flushed
=
buf_flush_batch
(
BUF_FLUSH_LIST
,
100
,
n_pages_flushed
=
buf_flush_batch
(
BUF_FLUSH_LIST
,
PCT_IO
(
100
)
,
IB_ULONGLONG_MAX
);
}
else
{
/* Otherwise, we only flush a small number of pages so that
we do not unnecessarily use much disk i/o capacity from
other work */
n_pages_flushed
=
buf_flush_batch
(
BUF_FLUSH_LIST
,
10
,
n_pages_flushed
=
buf_flush_batch
(
BUF_FLUSH_LIST
,
PCT_IO
(
10
)
,
IB_ULONGLONG_MAX
);
}
...
...
@@ -2489,7 +2540,7 @@ srv_master_thread(
n_bytes_merged
=
0
;
}
else
{
n_bytes_merged
=
ibuf_contract_for_n_pages
(
TRUE
,
srv_insert_buffer_batch_size
);
TRUE
,
PCT_IO
((
srv_insert_buffer_batch_size
*
5
))
);
}
srv_main_thread_op_info
=
"reserving kernel mutex"
;
...
...
@@ -2505,7 +2556,7 @@ srv_master_thread(
srv_main_thread_op_info
=
"flushing buffer pool pages"
;
if
(
srv_fast_shutdown
<
2
)
{
n_pages_flushed
=
buf_flush_batch
(
BUF_FLUSH_LIST
,
100
,
n_pages_flushed
=
buf_flush_batch
(
BUF_FLUSH_LIST
,
PCT_IO
(
100
)
,
IB_ULONGLONG_MAX
);
}
else
{
/* In the fastest shutdown we do not flush the buffer pool
...
...
srv/srv0start.c
View file @
bf05180b
...
...
@@ -1204,24 +1204,28 @@ innobase_start_or_create_for_mysql(void)
return
(
DB_ERROR
);
}
/* over write innodb_file_io_threads */
srv_n_file_io_threads
=
2
+
srv_n_read_io_threads
+
srv_n_write_io_threads
;
/* Restrict the maximum number of file i/o threads */
if
(
srv_n_file_io_threads
>
SRV_MAX_N_IO_THREADS
)
{
srv_n_file_io_threads
=
SRV_MAX_N_IO_THREADS
;
srv_n_read_io_threads
=
srv_n_write_io_threads
=
(
SRV_MAX_N_IO_THREADS
-
2
)
/
2
;
}
if
(
!
os_aio_use_native_aio
)
{
/* In simulated aio we currently have use only for 4 threads */
srv_n_file_io_threads
=
4
;
/*srv_n_file_io_threads = 4;*/
os_aio_init
(
8
*
SRV_N_PENDING_IOS_PER_THREAD
*
srv_n_file_io_threads
,
srv_n_
fil
e_io_threads
,
SRV_MAX_N_PENDING_SYNC_IOS
);
srv_n_
read_io_threads
,
srv_n_writ
e_io_threads
,
SRV_MAX_N_PENDING_SYNC_IOS
*
8
);
}
else
{
os_aio_init
(
SRV_N_PENDING_IOS_PER_THREAD
*
srv_n_file_io_threads
,
srv_n_
fil
e_io_threads
,
srv_n_
read_io_threads
,
srv_n_writ
e_io_threads
,
SRV_MAX_N_PENDING_SYNC_IOS
);
}
...
...
sync/sync0arr.c
View file @
bf05180b
...
...
@@ -307,13 +307,13 @@ sync_cell_event_reset(
{
if
(
type
==
SYNC_MUTEX
)
{
return
(
os_event_reset
(((
mutex_t
*
)
object
)
->
event
));
#ifdef __WIN__
}
else
if
(
type
==
RW_LOCK_WAIT_EX
)
{
return
(
os_event_reset
(
((
rw_lock_t
*
)
object
)
->
wait_ex_event
));
}
else
if
(
type
==
RW_LOCK_SHARED
)
{
return
(
os_event_reset
(((
rw_lock_t
*
)
object
)
->
s_event
));
}
else
{
/* RW_LOCK_EX */
return
(
os_event_reset
(((
rw_lock_t
*
)
object
)
->
x_event
));
#endif
}
else
{
return
(
os_event_reset
(((
rw_lock_t
*
)
object
)
->
event
));
}
}
...
...
@@ -413,12 +413,15 @@ sync_array_wait_event(
if
(
cell
->
request_type
==
SYNC_MUTEX
)
{
event
=
((
mutex_t
*
)
cell
->
wait_object
)
->
event
;
#ifdef __WIN__
/* On windows if the thread about to wait is the one which
has set the state of the rw_lock to RW_LOCK_WAIT_EX, then
it waits on a special event i.e.: wait_ex_event. */
}
else
if
(
cell
->
request_type
==
RW_LOCK_WAIT_EX
)
{
event
=
((
rw_lock_t
*
)
cell
->
wait_object
)
->
wait_ex_event
;
}
else
if
(
cell
->
request_type
==
RW_LOCK_SHARED
)
{
event
=
((
rw_lock_t
*
)
cell
->
wait_object
)
->
s_event
;
#endif
}
else
{
event
=
((
rw_lock_t
*
)
cell
->
wait_object
)
->
x_
event
;
event
=
((
rw_lock_t
*
)
cell
->
wait_object
)
->
event
;
}
cell
->
waiting
=
TRUE
;
...
...
@@ -459,7 +462,6 @@ sync_array_cell_print(
mutex_t
*
mutex
;
rw_lock_t
*
rwlock
;
ulint
type
;
ulint
writer
;
type
=
cell
->
request_type
;
...
...
@@ -489,10 +491,12 @@ sync_array_cell_print(
(
ulong
)
mutex
->
waiters
);
}
else
if
(
type
==
RW_LOCK_EX
#ifdef __WIN__
||
type
==
RW_LOCK_WAIT_EX
#endif
||
type
==
RW_LOCK_SHARED
)
{
fputs
(
type
==
RW_LOCK_
SHARED
?
"S-lock on"
:
"X
-lock on"
,
file
);
fputs
(
type
==
RW_LOCK_
EX
?
"X-lock on"
:
"S
-lock on"
,
file
);
rwlock
=
cell
->
old_wait_rw_lock
;
...
...
@@ -500,24 +504,22 @@ sync_array_cell_print(
" RW-latch at %p created in file %s line %lu
\n
"
,
(
void
*
)
rwlock
,
rwlock
->
cfile_name
,
(
ulong
)
rwlock
->
cline
);
writer
=
rw_lock_get_writer
(
rwlock
);
if
(
writer
!=
RW_LOCK_NOT_LOCKED
)
{
if
(
rwlock
->
writer
!=
RW_LOCK_NOT_LOCKED
)
{
fprintf
(
file
,
"a writer (thread id %lu) has"
" reserved it in mode %s"
,
(
ulong
)
os_thread_pf
(
rwlock
->
writer_thread
),
writer
==
RW_LOCK_EX
rwlock
->
writer
==
RW_LOCK_EX
?
" exclusive
\n
"
:
" wait exclusive
\n
"
);
}
fprintf
(
file
,
"number of readers %lu,
s_waiters flag %lu, x_
waiters flag %lu
\n
"
"number of readers %lu, waiters flag %lu
\n
"
"Last time read locked in file %s line %lu
\n
"
"Last time write locked in file %s line %lu
\n
"
,
(
ulong
)
rwlock
->
reader_count
,
(
ulong
)
rwlock
->
s_waiters
,
(
ulong
)
(
rwlock
->
x_waiters
||
rwlock
->
wait_ex_waiters
),
(
ulong
)
rwlock
->
waiters
,
rwlock
->
last_s_file_name
,
(
ulong
)
rwlock
->
last_s_line
,
rwlock
->
last_x_file_name
,
...
...
@@ -842,15 +844,11 @@ sync_array_object_signalled(
/*========================*/
sync_array_t
*
arr
)
/* in: wait array */
{
#ifdef HAVE_GCC_ATOMIC_BUILTINS
__sync_fetch_and_add
(
&
(
arr
->
sg_count
),
1
);
#else
sync_array_enter
(
arr
);
arr
->
sg_count
++
;
sync_array_exit
(
arr
);
#endif
}
/**************************************************************************
...
...
@@ -891,23 +889,19 @@ sync_arr_wake_threads_if_sema_free(void)
mutex
=
cell
->
wait_object
;
os_event_set
(
mutex
->
event
);
#ifdef __WIN__
}
else
if
(
cell
->
request_type
==
RW_LOCK_WAIT_EX
)
{
rw_lock_t
*
lock
;
lock
=
cell
->
wait_object
;
os_event_set
(
lock
->
wait_ex_event
);
}
else
if
(
cell
->
request_type
==
RW_LOCK_SHARED
)
{
rw_lock_t
*
lock
;
lock
=
cell
->
wait_object
;
os_event_set
(
lock
->
s_event
);
#endif
}
else
{
rw_lock_t
*
lock
;
rw_lock_t
*
lock
;
lock
=
cell
->
wait_object
;
os_event_set
(
lock
->
x_
event
);
os_event_set
(
lock
->
event
);
}
}
}
...
...
sync/sync0rw.c
View file @
bf05180b
...
...
@@ -119,7 +119,6 @@ rw_lock_create_func(
/* If this is the very first time a synchronization object is
created, then the following call initializes the sync system. */
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_create
(
rw_lock_get_mutex
(
lock
),
SYNC_NO_ORDER_CHECK
);
lock
->
mutex
.
cfile_name
=
cfile_name
;
...
...
@@ -129,14 +128,8 @@ rw_lock_create_func(
lock
->
mutex
.
cmutex_name
=
cmutex_name
;
lock
->
mutex
.
mutex_type
=
1
;
#endif
/* UNIV_DEBUG && !UNIV_HOTBACKUP */
#endif
/* !HAVE_GCC_ATOMIC_BUILTINS */
#ifdef HAVE_GCC_ATOMIC_BUILTINS
lock
->
lock_word
=
RW_LOCK_BIAS
;
#endif
rw_lock_set_s_waiters
(
lock
,
0
);
rw_lock_set_x_waiters
(
lock
,
0
);
rw_lock_set_wx_waiters
(
lock
,
0
);
rw_lock_set_waiters
(
lock
,
0
);
rw_lock_set_writer
(
lock
,
RW_LOCK_NOT_LOCKED
);
lock
->
writer_count
=
0
;
rw_lock_set_reader_count
(
lock
,
0
);
...
...
@@ -158,9 +151,11 @@ rw_lock_create_func(
lock
->
last_x_file_name
=
"not yet reserved"
;
lock
->
last_s_line
=
0
;
lock
->
last_x_line
=
0
;
lock
->
s_event
=
os_event_create
(
NULL
);
lock
->
x_event
=
os_event_create
(
NULL
);
lock
->
event
=
os_event_create
(
NULL
);
#ifdef __WIN__
lock
->
wait_ex_event
=
os_event_create
(
NULL
);
#endif
mutex_enter
(
&
rw_lock_list_mutex
);
...
...
@@ -186,21 +181,19 @@ rw_lock_free(
{
ut_ad
(
rw_lock_validate
(
lock
));
ut_a
(
rw_lock_get_writer
(
lock
)
==
RW_LOCK_NOT_LOCKED
);
ut_a
(
rw_lock_get_s_waiters
(
lock
)
==
0
);
ut_a
(
rw_lock_get_x_waiters
(
lock
)
==
0
);
ut_a
(
rw_lock_get_wx_waiters
(
lock
)
==
0
);
ut_a
(
rw_lock_get_waiters
(
lock
)
==
0
);
ut_a
(
rw_lock_get_reader_count
(
lock
)
==
0
);
lock
->
magic_n
=
0
;
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_free
(
rw_lock_get_mutex
(
lock
));
#endif
mutex_enter
(
&
rw_lock_list_mutex
);
os_event_free
(
lock
->
s_event
);
os_event_free
(
lock
->
x_event
);
os_event_free
(
lock
->
event
);
#ifdef __WIN__
os_event_free
(
lock
->
wait_ex_event
);
#endif
if
(
UT_LIST_GET_PREV
(
list
,
lock
))
{
ut_a
(
UT_LIST_GET_PREV
(
list
,
lock
)
->
magic_n
==
RW_LOCK_MAGIC_N
);
...
...
@@ -218,8 +211,6 @@ rw_lock_free(
/**********************************************************************
Checks that the rw-lock has been initialized and that there are no
simultaneous shared and exclusive locks. */
/* MEMO: If HAVE_GCC_ATOMIC_BUILTINS, we should use this function statically. */
UNIV_INTERN
ibool
rw_lock_validate
(
...
...
@@ -228,9 +219,7 @@ rw_lock_validate(
{
ut_a
(
lock
);
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_enter
(
rw_lock_get_mutex
(
lock
));
#endif
ut_a
(
lock
->
magic_n
==
RW_LOCK_MAGIC_N
);
ut_a
((
rw_lock_get_reader_count
(
lock
)
==
0
)
...
...
@@ -238,17 +227,11 @@ rw_lock_validate(
ut_a
((
rw_lock_get_writer
(
lock
)
==
RW_LOCK_EX
)
||
(
rw_lock_get_writer
(
lock
)
==
RW_LOCK_WAIT_EX
)
||
(
rw_lock_get_writer
(
lock
)
==
RW_LOCK_NOT_LOCKED
));
ut_a
((
rw_lock_get_s_waiters
(
lock
)
==
0
)
||
(
rw_lock_get_s_waiters
(
lock
)
==
1
));
ut_a
((
rw_lock_get_x_waiters
(
lock
)
==
0
)
||
(
rw_lock_get_x_waiters
(
lock
)
==
1
));
ut_a
((
rw_lock_get_wx_waiters
(
lock
)
==
0
)
||
(
rw_lock_get_wx_waiters
(
lock
)
==
1
));
ut_a
((
rw_lock_get_waiters
(
lock
)
==
0
)
||
(
rw_lock_get_waiters
(
lock
)
==
1
));
ut_a
((
lock
->
writer
!=
RW_LOCK_EX
)
||
(
lock
->
writer_count
>
0
));
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_exit
(
rw_lock_get_mutex
(
lock
));
#endif
return
(
TRUE
);
}
...
...
@@ -275,14 +258,13 @@ rw_lock_s_lock_spin(
ut_ad
(
rw_lock_validate
(
lock
));
lock_loop:
i
=
0
;
spin_loop:
rw_s_spin_wait_count
++
;
/* Spin waiting for the writer field to become free */
i
=
0
;
while
(
i
<
SYNC_SPIN_ROUNDS
&&
rw_lock_get_writer
(
lock
)
!=
RW_LOCK_NOT_LOCKED
)
{
while
(
rw_lock_get_writer
(
lock
)
!=
RW_LOCK_NOT_LOCKED
&&
i
<
SYNC_SPIN_ROUNDS
)
{
if
(
srv_spin_wait_delay
)
{
ut_delay
(
ut_rnd_interval
(
0
,
srv_spin_wait_delay
));
}
...
...
@@ -303,27 +285,15 @@ rw_lock_s_lock_spin(
lock
->
cfile_name
,
(
ulong
)
lock
->
cline
,
(
ulong
)
i
);
}
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_enter
(
rw_lock_get_mutex
(
lock
));
#endif
/* We try once again to obtain the lock */
if
(
TRUE
==
rw_lock_s_lock_low
(
lock
,
pass
,
file_name
,
line
))
{
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_exit
(
rw_lock_get_mutex
(
lock
));
#endif
return
;
/* Success */
}
else
{
#ifdef HAVE_GCC_ATOMIC_BUILTINS
/* like sync0sync.c doing */
i
++
;
if
(
i
<
SYNC_SPIN_ROUNDS
)
{
goto
spin_loop
;
}
#endif
/* If we get here, locking did not succeed, we may
suspend the thread to wait in the wait array */
...
...
@@ -334,19 +304,9 @@ rw_lock_s_lock_spin(
file_name
,
line
,
&
index
);
rw_lock_set_
s_
waiters
(
lock
,
1
);
rw_lock_set_waiters
(
lock
,
1
);
#ifdef HAVE_GCC_ATOMIC_BUILTINS
/* like sync0sync.c doing */
for
(
i
=
0
;
i
<
4
;
i
++
)
{
if
(
TRUE
==
rw_lock_s_lock_low
(
lock
,
pass
,
file_name
,
line
))
{
sync_array_free_cell
(
sync_primary_wait_array
,
index
);
return
;
/* Success */
}
}
#else
mutex_exit
(
rw_lock_get_mutex
(
lock
));
#endif
if
(
srv_print_latch_waits
)
{
fprintf
(
stderr
,
...
...
@@ -383,19 +343,13 @@ rw_lock_x_lock_move_ownership(
{
ut_ad
(
rw_lock_is_locked
(
lock
,
RW_LOCK_EX
));
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_enter
(
&
(
lock
->
mutex
));
#endif
lock
->
writer_thread
=
os_thread_get_curr_id
();
lock
->
pass
=
0
;
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_exit
(
&
(
lock
->
mutex
));
#else
__sync_synchronize
();
#endif
}
/**********************************************************************
...
...
@@ -413,89 +367,6 @@ rw_lock_x_lock_low(
const
char
*
file_name
,
/* in: file name where lock requested */
ulint
line
)
/* in: line where requested */
{
#ifdef HAVE_GCC_ATOMIC_BUILTINS
os_thread_id_t
curr_thread
=
os_thread_get_curr_id
();
/* try to lock writer */
if
(
__sync_lock_test_and_set
(
&
(
lock
->
writer
),
RW_LOCK_EX
)
==
RW_LOCK_NOT_LOCKED
)
{
/* success */
/* obtain RW_LOCK_WAIT_EX right */
lock
->
writer_thread
=
curr_thread
;
lock
->
pass
=
pass
;
lock
->
writer_is_wait_ex
=
TRUE
;
/* atomic operation may be safer about memory order. */
__sync_synchronize
();
#ifdef UNIV_SYNC_DEBUG
rw_lock_add_debug_info
(
lock
,
pass
,
RW_LOCK_WAIT_EX
,
file_name
,
line
);
#endif
}
if
(
!
os_thread_eq
(
lock
->
writer_thread
,
curr_thread
))
{
return
(
RW_LOCK_NOT_LOCKED
);
}
switch
(
rw_lock_get_writer
(
lock
))
{
case
RW_LOCK_WAIT_EX
:
/* have right to try x-lock */
if
(
lock
->
lock_word
==
RW_LOCK_BIAS
)
{
/* try x-lock */
if
(
__sync_sub_and_fetch
(
&
(
lock
->
lock_word
),
RW_LOCK_BIAS
)
==
0
)
{
/* success */
lock
->
pass
=
pass
;
lock
->
writer_is_wait_ex
=
FALSE
;
__sync_fetch_and_add
(
&
(
lock
->
writer_count
),
1
);
#ifdef UNIV_SYNC_DEBUG
rw_lock_remove_debug_info
(
lock
,
pass
,
RW_LOCK_WAIT_EX
);
rw_lock_add_debug_info
(
lock
,
pass
,
RW_LOCK_EX
,
file_name
,
line
);
#endif
lock
->
last_x_file_name
=
file_name
;
lock
->
last_x_line
=
line
;
/* Locking succeeded, we may return */
return
(
RW_LOCK_EX
);
}
else
{
/* fail */
__sync_fetch_and_add
(
&
(
lock
->
lock_word
),
RW_LOCK_BIAS
);
}
}
/* There are readers, we have to wait */
return
(
RW_LOCK_WAIT_EX
);
break
;
case
RW_LOCK_EX
:
/* already have x-lock */
if
((
lock
->
pass
==
0
)
&&
(
pass
==
0
))
{
__sync_fetch_and_add
(
&
(
lock
->
writer_count
),
1
);
#ifdef UNIV_SYNC_DEBUG
rw_lock_add_debug_info
(
lock
,
pass
,
RW_LOCK_EX
,
file_name
,
line
);
#endif
lock
->
last_x_file_name
=
file_name
;
lock
->
last_x_line
=
line
;
/* Locking succeeded, we may return */
return
(
RW_LOCK_EX
);
}
return
(
RW_LOCK_NOT_LOCKED
);
break
;
default:
/* ??? */
return
(
RW_LOCK_NOT_LOCKED
);
}
#else
/* HAVE_GCC_ATOMIC_BUILTINS */
ut_ad
(
mutex_own
(
rw_lock_get_mutex
(
lock
)));
if
(
rw_lock_get_writer
(
lock
)
==
RW_LOCK_NOT_LOCKED
)
{
...
...
@@ -576,7 +447,6 @@ rw_lock_x_lock_low(
/* Locking succeeded, we may return */
return
(
RW_LOCK_EX
);
}
#endif
/* HAVE_GCC_ATOMIC_BUILTINS */
/* Locking did not succeed */
return
(
RW_LOCK_NOT_LOCKED
);
...
...
@@ -602,33 +472,19 @@ rw_lock_x_lock_func(
ulint
line
)
/* in: line where requested */
{
ulint
index
;
/* index of the reserved wait cell */
ulint
state
=
RW_LOCK_NOT_LOCKED
;
/* lock state acquired */
#ifdef HAVE_GCC_ATOMIC_BUILTINS
ulint
prev_state
=
RW_LOCK_NOT_LOCKED
;
#endif
ulint
state
;
/* lock state acquired */
ulint
i
;
/* spin round count */
ut_ad
(
rw_lock_validate
(
lock
));
lock_loop:
i
=
0
;
#ifdef HAVE_GCC_ATOMIC_BUILTINS
prev_state
=
state
;
#else
/* Acquire the mutex protecting the rw-lock fields */
mutex_enter_fast
(
&
(
lock
->
mutex
));
#endif
state
=
rw_lock_x_lock_low
(
lock
,
pass
,
file_name
,
line
);
#ifdef HAVE_GCC_ATOMIC_BUILTINS
if
(
state
!=
prev_state
)
i
=
0
;
/* if progress, reset counter. */
#else
mutex_exit
(
&
(
lock
->
mutex
));
#endif
spin_loop:
if
(
state
==
RW_LOCK_EX
)
{
return
;
/* Locking succeeded */
...
...
@@ -636,9 +492,10 @@ rw_lock_x_lock_func(
}
else
if
(
state
==
RW_LOCK_NOT_LOCKED
)
{
/* Spin waiting for the writer field to become free */
i
=
0
;
while
(
i
<
SYNC_SPIN_ROUNDS
&&
rw_lock_get_writer
(
lock
)
!=
RW_LOCK_NOT_LOCKED
)
{
while
(
rw_lock_get_writer
(
lock
)
!=
RW_LOCK_NOT_LOCKED
&&
i
<
SYNC_SPIN_ROUNDS
)
{
if
(
srv_spin_wait_delay
)
{
ut_delay
(
ut_rnd_interval
(
0
,
srv_spin_wait_delay
));
...
...
@@ -652,12 +509,9 @@ rw_lock_x_lock_func(
}
else
if
(
state
==
RW_LOCK_WAIT_EX
)
{
/* Spin waiting for the reader count field to become zero */
i
=
0
;
#ifdef HAVE_GCC_ATOMIC_BUILTINS
while
(
lock
->
lock_word
!=
RW_LOCK_BIAS
#else
while
(
rw_lock_get_reader_count
(
lock
)
!=
0
#endif
&&
i
<
SYNC_SPIN_ROUNDS
)
{
if
(
srv_spin_wait_delay
)
{
ut_delay
(
ut_rnd_interval
(
0
,
...
...
@@ -670,6 +524,7 @@ rw_lock_x_lock_func(
os_thread_yield
();
}
}
else
{
i
=
0
;
/* Eliminate a compiler warning */
ut_error
;
}
...
...
@@ -686,69 +541,34 @@ rw_lock_x_lock_func(
/* We try once again to obtain the lock. Acquire the mutex protecting
the rw-lock fields */
#ifdef HAVE_GCC_ATOMIC_BUILTINS
prev_state
=
state
;
#else
mutex_enter
(
rw_lock_get_mutex
(
lock
));
#endif
state
=
rw_lock_x_lock_low
(
lock
,
pass
,
file_name
,
line
);
#ifdef HAVE_GCC_ATOMIC_BUILTINS
if
(
state
!=
prev_state
)
i
=
0
;
/* if progress, reset counter. */
#endif
if
(
state
==
RW_LOCK_EX
)
{
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_exit
(
rw_lock_get_mutex
(
lock
));
#endif
return
;
/* Locking succeeded */
}
#ifdef HAVE_GCC_ATOMIC_BUILTINS
/* like sync0sync.c doing */
i
++
;
if
(
i
<
SYNC_SPIN_ROUNDS
)
{
goto
spin_loop
;
}
#endif
rw_x_system_call_count
++
;
sync_array_reserve_cell
(
sync_primary_wait_array
,
lock
,
#ifdef __WIN__
/* On windows RW_LOCK_WAIT_EX signifies
that this thread should wait on the
special wait_ex_event. */
(
state
==
RW_LOCK_WAIT_EX
)
?
RW_LOCK_WAIT_EX
:
#endif
RW_LOCK_EX
,
file_name
,
line
,
&
index
);
if
(
state
==
RW_LOCK_WAIT_EX
)
{
rw_lock_set_wx_waiters
(
lock
,
1
);
}
else
{
rw_lock_set_x_waiters
(
lock
,
1
);
}
rw_lock_set_waiters
(
lock
,
1
);
#ifdef HAVE_GCC_ATOMIC_BUILTINS
/* like sync0sync.c doing */
for
(
i
=
0
;
i
<
4
;
i
++
)
{
prev_state
=
state
;
state
=
rw_lock_x_lock_low
(
lock
,
pass
,
file_name
,
line
);
if
(
state
==
RW_LOCK_EX
)
{
sync_array_free_cell
(
sync_primary_wait_array
,
index
);
return
;
/* Locking succeeded */
}
if
(
state
!=
prev_state
)
{
/* retry! */
sync_array_free_cell
(
sync_primary_wait_array
,
index
);
goto
lock_loop
;
}
}
#else
mutex_exit
(
rw_lock_get_mutex
(
lock
));
#endif
if
(
srv_print_latch_waits
)
{
fprintf
(
stderr
,
...
...
@@ -910,9 +730,7 @@ rw_lock_own(
ut_ad
(
lock
);
ut_ad
(
rw_lock_validate
(
lock
));
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_enter
(
&
(
lock
->
mutex
));
#endif
info
=
UT_LIST_GET_FIRST
(
lock
->
debug_list
);
...
...
@@ -922,9 +740,7 @@ rw_lock_own(
&&
(
info
->
pass
==
0
)
&&
(
info
->
lock_type
==
lock_type
))
{
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_exit
(
&
(
lock
->
mutex
));
#endif
/* Found! */
return
(
TRUE
);
...
...
@@ -932,9 +748,7 @@ rw_lock_own(
info
=
UT_LIST_GET_NEXT
(
list
,
info
);
}
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_exit
(
&
(
lock
->
mutex
));
#endif
return
(
FALSE
);
}
...
...
@@ -956,25 +770,21 @@ rw_lock_is_locked(
ut_ad
(
lock
);
ut_ad
(
rw_lock_validate
(
lock
));
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_enter
(
&
(
lock
->
mutex
));
#endif
if
(
lock_type
==
RW_LOCK_SHARED
)
{
if
(
lock
->
reader_count
>
0
)
{
ret
=
TRUE
;
}
}
else
if
(
lock_type
==
RW_LOCK_EX
)
{
if
(
rw_lock_get_writer
(
lock
)
==
RW_LOCK_EX
)
{
if
(
lock
->
writer
==
RW_LOCK_EX
)
{
ret
=
TRUE
;
}
}
else
{
ut_error
;
}
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_exit
(
&
(
lock
->
mutex
));
#endif
return
(
ret
);
}
...
...
@@ -1004,26 +814,16 @@ rw_lock_list_print_info(
count
++
;
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_enter
(
&
(
lock
->
mutex
));
#endif
if
((
rw_lock_get_writer
(
lock
)
!=
RW_LOCK_NOT_LOCKED
)
||
(
rw_lock_get_reader_count
(
lock
)
!=
0
)
||
(
rw_lock_get_s_waiters
(
lock
)
!=
0
)
||
(
rw_lock_get_x_waiters
(
lock
)
!=
0
)
||
(
rw_lock_get_wx_waiters
(
lock
)
!=
0
))
{
||
(
rw_lock_get_waiters
(
lock
)
!=
0
))
{
fprintf
(
file
,
"RW-LOCK: %p "
,
(
void
*
)
lock
);
if
(
rw_lock_get_s_waiters
(
lock
))
{
fputs
(
" s_waiters for the lock exist,"
,
file
);
}
if
(
rw_lock_get_x_waiters
(
lock
))
{
fputs
(
" x_waiters for the lock exist
\n
"
,
file
);
}
if
(
rw_lock_get_wx_waiters
(
lock
))
{
fputs
(
" wait_ex_waiters for the lock exist
\n
"
,
file
);
if
(
rw_lock_get_waiters
(
lock
))
{
fputs
(
" Waiters for the lock exist
\n
"
,
file
);
}
else
{
putc
(
'\n'
,
file
);
}
...
...
@@ -1035,9 +835,7 @@ rw_lock_list_print_info(
}
}
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_exit
(
&
(
lock
->
mutex
));
#endif
lock
=
UT_LIST_GET_NEXT
(
list
,
lock
);
}
...
...
@@ -1062,18 +860,10 @@ rw_lock_print(
if
((
rw_lock_get_writer
(
lock
)
!=
RW_LOCK_NOT_LOCKED
)
||
(
rw_lock_get_reader_count
(
lock
)
!=
0
)
||
(
rw_lock_get_s_waiters
(
lock
)
!=
0
)
||
(
rw_lock_get_x_waiters
(
lock
)
!=
0
)
||
(
rw_lock_get_wx_waiters
(
lock
)
!=
0
))
{
||
(
rw_lock_get_waiters
(
lock
)
!=
0
))
{
if
(
rw_lock_get_s_waiters
(
lock
))
{
fputs
(
" s_waiters for the lock exist,"
,
stderr
);
}
if
(
rw_lock_get_x_waiters
(
lock
))
{
fputs
(
" x_waiters for the lock exist
\n
"
,
stderr
);
}
if
(
rw_lock_get_wx_waiters
(
lock
))
{
fputs
(
" wait_ex_waiters for the lock exist
\n
"
,
stderr
);
if
(
rw_lock_get_waiters
(
lock
))
{
fputs
(
" Waiters for the lock exist
\n
"
,
stderr
);
}
else
{
putc
(
'\n'
,
stderr
);
}
...
...
@@ -1132,18 +922,14 @@ rw_lock_n_locked(void)
lock
=
UT_LIST_GET_FIRST
(
rw_lock_list
);
while
(
lock
!=
NULL
)
{
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_enter
(
rw_lock_get_mutex
(
lock
));
#endif
if
((
rw_lock_get_writer
(
lock
)
!=
RW_LOCK_NOT_LOCKED
)
||
(
rw_lock_get_reader_count
(
lock
)
!=
0
))
{
count
++
;
}
#ifndef HAVE_GCC_ATOMIC_BUILTINS
mutex_exit
(
rw_lock_get_mutex
(
lock
));
#endif
lock
=
UT_LIST_GET_NEXT
(
list
,
lock
);
}
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment