Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
M
MariaDB
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Analytics
Analytics
CI / CD
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
nexedi
MariaDB
Commits
59182189
Commit
59182189
authored
Jan 28, 2008
by
unknown
Browse files
Options
Browse Files
Download
Plain Diff
Merge trift2.:/MySQL/M41/mysql-4.1
into trift2.:/MySQL/M41/push-4.1
parents
b7b1bd62
107d6251
Changes
14
Show whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
356 additions
and
197 deletions
+356
-197
innobase/buf/buf0buf.c
innobase/buf/buf0buf.c
+115
-56
innobase/buf/buf0flu.c
innobase/buf/buf0flu.c
+65
-27
innobase/buf/buf0lru.c
innobase/buf/buf0lru.c
+24
-1
innobase/include/buf0buf.h
innobase/include/buf0buf.h
+28
-12
innobase/include/buf0buf.ic
innobase/include/buf0buf.ic
+12
-7
innobase/include/sync0arr.h
innobase/include/sync0arr.h
+4
-9
innobase/include/sync0rw.h
innobase/include/sync0rw.h
+1
-0
innobase/include/sync0rw.ic
innobase/include/sync0rw.ic
+4
-2
innobase/include/sync0sync.h
innobase/include/sync0sync.h
+1
-0
innobase/os/os0sync.c
innobase/os/os0sync.c
+52
-3
innobase/sync/sync0arr.c
innobase/sync/sync0arr.c
+41
-78
innobase/sync/sync0rw.c
innobase/sync/sync0rw.c
+2
-0
innobase/sync/sync0sync.c
innobase/sync/sync0sync.c
+6
-2
mysql-test/mysql-test-run.pl
mysql-test/mysql-test-run.pl
+1
-0
No files found.
innobase/buf/buf0buf.c
View file @
59182189
...
@@ -221,6 +221,9 @@ in the free list to the frames.
...
@@ -221,6 +221,9 @@ in the free list to the frames.
5) When we have AWE enabled, we disable adaptive hash indexes.
5) When we have AWE enabled, we disable adaptive hash indexes.
*/
*/
/* Value in microseconds */
static
const
int
WAIT_FOR_READ
=
20000
;
buf_pool_t
*
buf_pool
=
NULL
;
/* The buffer buf_pool of the database */
buf_pool_t
*
buf_pool
=
NULL
;
/* The buffer buf_pool of the database */
ulint
buf_dbg_counter
=
0
;
/* This is used to insert validation
ulint
buf_dbg_counter
=
0
;
/* This is used to insert validation
...
@@ -466,6 +469,9 @@ buf_block_init(
...
@@ -466,6 +469,9 @@ buf_block_init(
block
->
n_pointers
=
0
;
block
->
n_pointers
=
0
;
mutex_create
(
&
block
->
mutex
);
mutex_set_level
(
&
block
->
mutex
,
SYNC_BUF_BLOCK
);
rw_lock_create
(
&
(
block
->
lock
));
rw_lock_create
(
&
(
block
->
lock
));
ut_ad
(
rw_lock_validate
(
&
(
block
->
lock
)));
ut_ad
(
rw_lock_validate
(
&
(
block
->
lock
)));
...
@@ -734,8 +740,15 @@ buf_awe_map_page_to_frame(
...
@@ -734,8 +740,15 @@ buf_awe_map_page_to_frame(
bck
=
UT_LIST_GET_LAST
(
buf_pool
->
awe_LRU_free_mapped
);
bck
=
UT_LIST_GET_LAST
(
buf_pool
->
awe_LRU_free_mapped
);
while
(
bck
)
{
while
(
bck
)
{
if
(
bck
->
state
==
BUF_BLOCK_FILE_PAGE
ibool
skip
;
&&
(
bck
->
buf_fix_count
!=
0
||
bck
->
io_fix
!=
0
))
{
mutex_enter
(
&
bck
->
mutex
);
skip
=
(
bck
->
state
==
BUF_BLOCK_FILE_PAGE
&&
(
bck
->
buf_fix_count
!=
0
||
bck
->
io_fix
!=
0
));
if
(
skip
)
{
mutex_exit
(
&
bck
->
mutex
);
/* We have to skip this */
/* We have to skip this */
bck
=
UT_LIST_GET_PREV
(
awe_LRU_free_mapped
,
bck
);
bck
=
UT_LIST_GET_PREV
(
awe_LRU_free_mapped
,
bck
);
...
@@ -768,6 +781,8 @@ buf_awe_map_page_to_frame(
...
@@ -768,6 +781,8 @@ buf_awe_map_page_to_frame(
buf_pool
->
n_pages_awe_remapped
++
;
buf_pool
->
n_pages_awe_remapped
++
;
mutex_exit
(
&
bck
->
mutex
);
return
;
return
;
}
}
}
}
...
@@ -806,13 +821,22 @@ buf_block_make_young(
...
@@ -806,13 +821,22 @@ buf_block_make_young(
/*=================*/
/*=================*/
buf_block_t
*
block
)
/* in: block to make younger */
buf_block_t
*
block
)
/* in: block to make younger */
{
{
#ifdef UNIV_SYNC_DEBUG
ut_ad
(
!
mutex_own
(
&
(
buf_pool
->
mutex
)));
#endif
/* UNIV_SYNC_DEBUG */
/* Note that we read freed_page_clock's without holding any mutex:
this is allowed since the result is used only in heuristics */
if
(
buf_pool
->
freed_page_clock
>=
block
->
freed_page_clock
if
(
buf_pool
->
freed_page_clock
>=
block
->
freed_page_clock
+
1
+
(
buf_pool
->
curr_size
/
102
4
))
{
+
1
+
(
buf_pool
->
curr_size
/
4
))
{
mutex_enter
(
&
buf_pool
->
mutex
);
/* There has been freeing activity in the LRU list:
/* There has been freeing activity in the LRU list:
best to move to the head of the LRU list */
best to move to the head of the LRU list */
buf_LRU_make_block_young
(
block
);
buf_LRU_make_block_young
(
block
);
mutex_exit
(
&
buf_pool
->
mutex
);
}
}
}
}
...
@@ -847,12 +871,16 @@ buf_block_free(
...
@@ -847,12 +871,16 @@ buf_block_free(
/*===========*/
/*===========*/
buf_block_t
*
block
)
/* in, own: block to be freed */
buf_block_t
*
block
)
/* in, own: block to be freed */
{
{
ut_a
(
block
->
state
!=
BUF_BLOCK_FILE_PAGE
);
mutex_enter
(
&
(
buf_pool
->
mutex
));
mutex_enter
(
&
(
buf_pool
->
mutex
));
mutex_enter
(
&
block
->
mutex
);
ut_a
(
block
->
state
!=
BUF_BLOCK_FILE_PAGE
);
buf_LRU_block_free_non_file_page
(
block
);
buf_LRU_block_free_non_file_page
(
block
);
mutex_exit
(
&
block
->
mutex
);
mutex_exit
(
&
(
buf_pool
->
mutex
));
mutex_exit
(
&
(
buf_pool
->
mutex
));
}
}
...
@@ -1071,9 +1099,8 @@ buf_page_get_gen(
...
@@ -1071,9 +1099,8 @@ buf_page_get_gen(
#endif
#endif
buf_pool
->
n_page_gets
++
;
buf_pool
->
n_page_gets
++
;
loop:
loop:
mutex_enter_fast
(
&
(
buf_pool
->
mutex
));
block
=
NULL
;
block
=
NULL
;
mutex_enter_fast
(
&
(
buf_pool
->
mutex
));
if
(
guess
)
{
if
(
guess
)
{
block
=
buf_block_align
(
guess
);
block
=
buf_block_align
(
guess
);
...
@@ -1111,6 +1138,8 @@ buf_page_get_gen(
...
@@ -1111,6 +1138,8 @@ buf_page_get_gen(
goto
loop
;
goto
loop
;
}
}
mutex_enter
(
&
block
->
mutex
);
ut_a
(
block
->
state
==
BUF_BLOCK_FILE_PAGE
);
ut_a
(
block
->
state
==
BUF_BLOCK_FILE_PAGE
);
must_read
=
FALSE
;
must_read
=
FALSE
;
...
@@ -1120,9 +1149,9 @@ buf_page_get_gen(
...
@@ -1120,9 +1149,9 @@ buf_page_get_gen(
must_read
=
TRUE
;
must_read
=
TRUE
;
if
(
mode
==
BUF_GET_IF_IN_POOL
)
{
if
(
mode
==
BUF_GET_IF_IN_POOL
)
{
/* The page is only being read to buffer */
/* The page is only being read to buffer */
mutex_exit
(
&
(
buf_pool
->
mutex
));
mutex_exit
(
&
buf_pool
->
mutex
);
mutex_exit
(
&
block
->
mutex
);
return
(
NULL
);
return
(
NULL
);
}
}
...
@@ -1146,7 +1175,7 @@ buf_page_get_gen(
...
@@ -1146,7 +1175,7 @@ buf_page_get_gen(
#else
#else
buf_block_buf_fix_inc
(
block
);
buf_block_buf_fix_inc
(
block
);
#endif
#endif
buf_block_make_young
(
block
);
mutex_exit
(
&
buf_pool
->
mutex
);
/* Check if this is the first access to the page */
/* Check if this is the first access to the page */
...
@@ -1154,10 +1183,13 @@ buf_page_get_gen(
...
@@ -1154,10 +1183,13 @@ buf_page_get_gen(
block
->
accessed
=
TRUE
;
block
->
accessed
=
TRUE
;
mutex_exit
(
&
block
->
mutex
);
buf_block_make_young
(
block
);
#ifdef UNIV_DEBUG_FILE_ACCESSES
#ifdef UNIV_DEBUG_FILE_ACCESSES
ut_a
(
block
->
file_page_was_freed
==
FALSE
);
ut_a
(
block
->
file_page_was_freed
==
FALSE
);
#endif
#endif
mutex_exit
(
&
(
buf_pool
->
mutex
));
#ifdef UNIV_DEBUG
#ifdef UNIV_DEBUG
buf_dbg_counter
++
;
buf_dbg_counter
++
;
...
@@ -1182,13 +1214,14 @@ buf_page_get_gen(
...
@@ -1182,13 +1214,14 @@ buf_page_get_gen(
}
}
if
(
!
success
)
{
if
(
!
success
)
{
mutex_enter
(
&
(
buf_pool
->
mutex
)
);
mutex_enter
(
&
block
->
mutex
);
block
->
buf_fix_count
--
;
block
->
buf_fix_count
--
;
mutex_exit
(
&
block
->
mutex
);
#ifdef UNIV_SYNC_DEBUG
#ifdef UNIV_SYNC_DEBUG
rw_lock_s_unlock
(
&
(
block
->
debug_latch
));
rw_lock_s_unlock
(
&
(
block
->
debug_latch
));
#endif
#endif
mutex_exit
(
&
(
buf_pool
->
mutex
));
return
(
NULL
);
return
(
NULL
);
}
}
...
@@ -1199,18 +1232,16 @@ buf_page_get_gen(
...
@@ -1199,18 +1232,16 @@ buf_page_get_gen(
completes */
completes */
for
(;;)
{
for
(;;)
{
mutex_enter
(
&
(
buf_pool
->
mutex
)
);
mutex_enter
(
&
block
->
mutex
);
if
(
block
->
io_fix
==
BUF_IO_READ
)
{
if
(
block
->
io_fix
==
BUF_IO_READ
)
{
mutex_exit
(
&
(
buf_pool
->
mutex
)
);
mutex_exit
(
&
block
->
mutex
);
/* Sleep 20 milliseconds */
os_thread_sleep
(
WAIT_FOR_READ
);
os_thread_sleep
(
20000
);
}
else
{
}
else
{
mutex_exit
(
&
(
buf_pool
->
mutex
)
);
mutex_exit
(
&
block
->
mutex
);
break
;
break
;
}
}
...
@@ -1269,13 +1300,13 @@ buf_page_optimistic_get_func(
...
@@ -1269,13 +1300,13 @@ buf_page_optimistic_get_func(
ut_ad
(
mtr
&&
block
);
ut_ad
(
mtr
&&
block
);
ut_ad
((
rw_latch
==
RW_S_LATCH
)
||
(
rw_latch
==
RW_X_LATCH
));
ut_ad
((
rw_latch
==
RW_S_LATCH
)
||
(
rw_latch
==
RW_X_LATCH
));
mutex_enter
(
&
(
buf_pool
->
mutex
));
/* If AWE is used, block may have a different frame now, e.g., NULL */
/* If AWE is used, block may have a different frame now, e.g., NULL */
mutex_enter
(
&
block
->
mutex
);
if
(
block
->
state
!=
BUF_BLOCK_FILE_PAGE
||
block
->
frame
!=
guess
)
{
if
(
block
->
state
!=
BUF_BLOCK_FILE_PAGE
||
block
->
frame
!=
guess
)
{
mutex_exit
(
&
(
buf_pool
->
mutex
)
);
mutex_exit
(
&
block
->
mutex
);
return
(
FALSE
);
return
(
FALSE
);
}
}
...
@@ -1285,15 +1316,14 @@ buf_page_optimistic_get_func(
...
@@ -1285,15 +1316,14 @@ buf_page_optimistic_get_func(
#else
#else
buf_block_buf_fix_inc
(
block
);
buf_block_buf_fix_inc
(
block
);
#endif
#endif
buf_block_make_young
(
block
);
/* Check if this is the first access to the page */
accessed
=
block
->
accessed
;
accessed
=
block
->
accessed
;
block
->
accessed
=
TRUE
;
block
->
accessed
=
TRUE
;
mutex_exit
(
&
(
buf_pool
->
mutex
));
mutex_exit
(
&
block
->
mutex
);
buf_block_make_young
(
block
);
/* Check if this is the first access to the page */
ut_ad
(
!
ibuf_inside
()
||
ibuf_page
(
block
->
space
,
block
->
offset
));
ut_ad
(
!
ibuf_inside
()
||
ibuf_page
(
block
->
space
,
block
->
offset
));
...
@@ -1308,14 +1338,15 @@ buf_page_optimistic_get_func(
...
@@ -1308,14 +1338,15 @@ buf_page_optimistic_get_func(
}
}
if
(
!
success
)
{
if
(
!
success
)
{
mutex_enter
(
&
(
buf_pool
->
mutex
)
);
mutex_enter
(
&
block
->
mutex
);
block
->
buf_fix_count
--
;
block
->
buf_fix_count
--
;
mutex_exit
(
&
block
->
mutex
);
#ifdef UNIV_SYNC_DEBUG
#ifdef UNIV_SYNC_DEBUG
rw_lock_s_unlock
(
&
(
block
->
debug_latch
));
rw_lock_s_unlock
(
&
(
block
->
debug_latch
));
#endif
#endif
mutex_exit
(
&
(
buf_pool
->
mutex
));
return
(
FALSE
);
return
(
FALSE
);
}
}
...
@@ -1329,14 +1360,15 @@ buf_page_optimistic_get_func(
...
@@ -1329,14 +1360,15 @@ buf_page_optimistic_get_func(
rw_lock_x_unlock
(
&
(
block
->
lock
));
rw_lock_x_unlock
(
&
(
block
->
lock
));
}
}
mutex_enter
(
&
(
buf_pool
->
mutex
)
);
mutex_enter
(
&
block
->
mutex
);
block
->
buf_fix_count
--
;
block
->
buf_fix_count
--
;
mutex_exit
(
&
block
->
mutex
);
#ifdef UNIV_SYNC_DEBUG
#ifdef UNIV_SYNC_DEBUG
rw_lock_s_unlock
(
&
(
block
->
debug_latch
));
rw_lock_s_unlock
(
&
(
block
->
debug_latch
));
#endif
#endif
mutex_exit
(
&
(
buf_pool
->
mutex
));
return
(
FALSE
);
return
(
FALSE
);
}
}
...
@@ -1394,10 +1426,10 @@ buf_page_get_known_nowait(
...
@@ -1394,10 +1426,10 @@ buf_page_get_known_nowait(
ut_ad
(
mtr
);
ut_ad
(
mtr
);
ut_ad
((
rw_latch
==
RW_S_LATCH
)
||
(
rw_latch
==
RW_X_LATCH
));
ut_ad
((
rw_latch
==
RW_S_LATCH
)
||
(
rw_latch
==
RW_X_LATCH
));
mutex_enter
(
&
(
buf_pool
->
mutex
));
block
=
buf_block_align
(
guess
);
block
=
buf_block_align
(
guess
);
mutex_enter
(
&
block
->
mutex
);
if
(
block
->
state
==
BUF_BLOCK_REMOVE_HASH
)
{
if
(
block
->
state
==
BUF_BLOCK_REMOVE_HASH
)
{
/* Another thread is just freeing the block from the LRU list
/* Another thread is just freeing the block from the LRU list
of the buffer pool: do not try to access this page; this
of the buffer pool: do not try to access this page; this
...
@@ -1406,7 +1438,7 @@ buf_page_get_known_nowait(
...
@@ -1406,7 +1438,7 @@ buf_page_get_known_nowait(
we have already removed it from the page address hash table
we have already removed it from the page address hash table
of the buffer pool. */
of the buffer pool. */
mutex_exit
(
&
(
buf_pool
->
mutex
)
);
mutex_exit
(
&
block
->
mutex
);
return
(
FALSE
);
return
(
FALSE
);
}
}
...
@@ -1418,12 +1450,12 @@ buf_page_get_known_nowait(
...
@@ -1418,12 +1450,12 @@ buf_page_get_known_nowait(
#else
#else
buf_block_buf_fix_inc
(
block
);
buf_block_buf_fix_inc
(
block
);
#endif
#endif
mutex_exit
(
&
block
->
mutex
);
if
(
mode
==
BUF_MAKE_YOUNG
)
{
if
(
mode
==
BUF_MAKE_YOUNG
)
{
buf_block_make_young
(
block
);
buf_block_make_young
(
block
);
}
}
mutex_exit
(
&
(
buf_pool
->
mutex
));
ut_ad
(
!
ibuf_inside
()
||
(
mode
==
BUF_KEEP_OLD
));
ut_ad
(
!
ibuf_inside
()
||
(
mode
==
BUF_KEEP_OLD
));
if
(
rw_latch
==
RW_S_LATCH
)
{
if
(
rw_latch
==
RW_S_LATCH
)
{
...
@@ -1437,13 +1469,15 @@ buf_page_get_known_nowait(
...
@@ -1437,13 +1469,15 @@ buf_page_get_known_nowait(
}
}
if
(
!
success
)
{
if
(
!
success
)
{
mutex_enter
(
&
(
buf_pool
->
mutex
)
);
mutex_enter
(
&
block
->
mutex
);
block
->
buf_fix_count
--
;
block
->
buf_fix_count
--
;
mutex_exit
(
&
block
->
mutex
);
#ifdef UNIV_SYNC_DEBUG
#ifdef UNIV_SYNC_DEBUG
rw_lock_s_unlock
(
&
(
block
->
debug_latch
));
rw_lock_s_unlock
(
&
(
block
->
debug_latch
));
#endif
#endif
mutex_exit
(
&
(
buf_pool
->
mutex
));
return
(
FALSE
);
return
(
FALSE
);
}
}
...
@@ -1491,7 +1525,6 @@ buf_page_init_for_backup_restore(
...
@@ -1491,7 +1525,6 @@ buf_page_init_for_backup_restore(
block
->
offset
=
offset
;
block
->
offset
=
offset
;
block
->
lock_hash_val
=
0
;
block
->
lock_hash_val
=
0
;
block
->
lock_mutex
=
NULL
;
block
->
freed_page_clock
=
0
;
block
->
freed_page_clock
=
0
;
...
@@ -1524,6 +1557,7 @@ buf_page_init(
...
@@ -1524,6 +1557,7 @@ buf_page_init(
{
{
#ifdef UNIV_SYNC_DEBUG
#ifdef UNIV_SYNC_DEBUG
ut_ad
(
mutex_own
(
&
(
buf_pool
->
mutex
)));
ut_ad
(
mutex_own
(
&
(
buf_pool
->
mutex
)));
ut_ad
(
mutex_own
(
&
(
block
->
mutex
)));
#endif
/* UNIV_SYNC_DEBUG */
#endif
/* UNIV_SYNC_DEBUG */
ut_a
(
block
->
state
!=
BUF_BLOCK_FILE_PAGE
);
ut_a
(
block
->
state
!=
BUF_BLOCK_FILE_PAGE
);
...
@@ -1537,7 +1571,6 @@ buf_page_init(
...
@@ -1537,7 +1571,6 @@ buf_page_init(
block
->
check_index_page_at_flush
=
FALSE
;
block
->
check_index_page_at_flush
=
FALSE
;
block
->
lock_hash_val
=
lock_rec_hash
(
space
,
offset
);
block
->
lock_hash_val
=
lock_rec_hash
(
space
,
offset
);
block
->
lock_mutex
=
NULL
;
/* Insert into the hash table of file pages */
/* Insert into the hash table of file pages */
...
@@ -1630,6 +1663,7 @@ buf_page_init_for_read(
...
@@ -1630,6 +1663,7 @@ buf_page_init_for_read(
ut_a
(
block
);
ut_a
(
block
);
mutex_enter
(
&
(
buf_pool
->
mutex
));
mutex_enter
(
&
(
buf_pool
->
mutex
));
mutex_enter
(
&
block
->
mutex
);
if
(
fil_tablespace_deleted_or_being_deleted_in_mem
(
space
,
if
(
fil_tablespace_deleted_or_being_deleted_in_mem
(
space
,
tablespace_version
))
{
tablespace_version
))
{
...
@@ -1642,7 +1676,9 @@ buf_page_init_for_read(
...
@@ -1642,7 +1676,9 @@ buf_page_init_for_read(
/* The page belongs to a space which has been deleted or is
/* The page belongs to a space which has been deleted or is
being deleted, or the page is already in buf_pool, return */
being deleted, or the page is already in buf_pool, return */
mutex_exit
(
&
block
->
mutex
);
mutex_exit
(
&
(
buf_pool
->
mutex
));
mutex_exit
(
&
(
buf_pool
->
mutex
));
buf_block_free
(
block
);
buf_block_free
(
block
);
if
(
mode
==
BUF_READ_IBUF_PAGES_ONLY
)
{
if
(
mode
==
BUF_READ_IBUF_PAGES_ONLY
)
{
...
@@ -1662,6 +1698,7 @@ buf_page_init_for_read(
...
@@ -1662,6 +1698,7 @@ buf_page_init_for_read(
buf_LRU_add_block
(
block
,
TRUE
);
/* TRUE == to old blocks */
buf_LRU_add_block
(
block
,
TRUE
);
/* TRUE == to old blocks */
block
->
io_fix
=
BUF_IO_READ
;
block
->
io_fix
=
BUF_IO_READ
;
buf_pool
->
n_pend_reads
++
;
buf_pool
->
n_pend_reads
++
;
/* We set a pass-type x-lock on the frame because then the same
/* We set a pass-type x-lock on the frame because then the same
...
@@ -1673,6 +1710,7 @@ buf_page_init_for_read(
...
@@ -1673,6 +1710,7 @@ buf_page_init_for_read(
rw_lock_x_lock_gen
(
&
(
block
->
lock
),
BUF_IO_READ
);
rw_lock_x_lock_gen
(
&
(
block
->
lock
),
BUF_IO_READ
);
mutex_exit
(
&
block
->
mutex
);
mutex_exit
(
&
(
buf_pool
->
mutex
));
mutex_exit
(
&
(
buf_pool
->
mutex
));
if
(
mode
==
BUF_READ_IBUF_PAGES_ONLY
)
{
if
(
mode
==
BUF_READ_IBUF_PAGES_ONLY
)
{
...
@@ -1735,6 +1773,8 @@ buf_page_create(
...
@@ -1735,6 +1773,8 @@ buf_page_create(
block
=
free_block
;
block
=
free_block
;
mutex_enter
(
&
block
->
mutex
);
buf_page_init
(
space
,
offset
,
block
);
buf_page_init
(
space
,
offset
,
block
);
/* The block must be put to the LRU list */
/* The block must be put to the LRU list */
...
@@ -1745,13 +1785,15 @@ buf_page_create(
...
@@ -1745,13 +1785,15 @@ buf_page_create(
#else
#else
buf_block_buf_fix_inc
(
block
);
buf_block_buf_fix_inc
(
block
);
#endif
#endif
buf_pool
->
n_pages_created
++
;
mutex_exit
(
&
(
buf_pool
->
mutex
));
mtr_memo_push
(
mtr
,
block
,
MTR_MEMO_BUF_FIX
);
mtr_memo_push
(
mtr
,
block
,
MTR_MEMO_BUF_FIX
);
block
->
accessed
=
TRUE
;
block
->
accessed
=
TRUE
;
buf_pool
->
n_pages_created
++
;
mutex_exit
(
&
block
->
mutex
);
mutex_exit
(
&
(
buf_pool
->
mutex
));
/* Delete possible entries for the page from the insert buffer:
/* Delete possible entries for the page from the insert buffer:
such can exist if the page belonged to an index which was dropped */
such can exist if the page belonged to an index which was dropped */
...
@@ -1800,6 +1842,12 @@ buf_page_io_complete(
...
@@ -1800,6 +1842,12 @@ buf_page_io_complete(
ut_a
(
block
->
state
==
BUF_BLOCK_FILE_PAGE
);
ut_a
(
block
->
state
==
BUF_BLOCK_FILE_PAGE
);
/* We do not need protect block->io_fix here by block->mutex to read
it because this is the only function where we can change the value
from BUF_IO_READ or BUF_IO_WRITE to some other value, and our code
ensures that this is the only thread that handles the i/o for this
block. */
io_type
=
block
->
io_fix
;
io_type
=
block
->
io_fix
;
if
(
io_type
==
BUF_IO_READ
)
{
if
(
io_type
==
BUF_IO_READ
)
{
...
@@ -1868,11 +1916,12 @@ buf_page_io_complete(
...
@@ -1868,11 +1916,12 @@ buf_page_io_complete(
}
}
}
}
mutex_enter
(
&
(
buf_pool
->
mutex
));
mutex_enter
(
&
block
->
mutex
);
#ifdef UNIV_IBUF_DEBUG
#ifdef UNIV_IBUF_DEBUG
ut_a
(
ibuf_count_get
(
block
->
space
,
block
->
offset
)
==
0
);
ut_a
(
ibuf_count_get
(
block
->
space
,
block
->
offset
)
==
0
);
#endif
#endif
mutex_enter
(
&
(
buf_pool
->
mutex
));
/* Because this thread which does the unlocking is not the same that
/* Because this thread which does the unlocking is not the same that
did the locking, we use a pass value != 0 in unlock, which simply
did the locking, we use a pass value != 0 in unlock, which simply
removes the newest lock debug record, without checking the thread
removes the newest lock debug record, without checking the thread
...
@@ -1911,6 +1960,7 @@ buf_page_io_complete(
...
@@ -1911,6 +1960,7 @@ buf_page_io_complete(
}
}
}
}
mutex_exit
(
&
block
->
mutex
);
mutex_exit
(
&
(
buf_pool
->
mutex
));
mutex_exit
(
&
(
buf_pool
->
mutex
));
if
(
buf_debug_prints
)
{
if
(
buf_debug_prints
)
{
...
@@ -1970,6 +2020,8 @@ buf_validate(void)
...
@@ -1970,6 +2020,8 @@ buf_validate(void)
block
=
buf_pool_get_nth_block
(
buf_pool
,
i
);
block
=
buf_pool_get_nth_block
(
buf_pool
,
i
);
mutex_enter
(
&
block
->
mutex
);
if
(
block
->
state
==
BUF_BLOCK_FILE_PAGE
)
{
if
(
block
->
state
==
BUF_BLOCK_FILE_PAGE
)
{
ut_a
(
buf_page_hash_get
(
block
->
space
,
ut_a
(
buf_page_hash_get
(
block
->
space
,
...
@@ -2013,6 +2065,8 @@ buf_validate(void)
...
@@ -2013,6 +2065,8 @@ buf_validate(void)
}
else
if
(
block
->
state
==
BUF_BLOCK_NOT_USED
)
{
}
else
if
(
block
->
state
==
BUF_BLOCK_NOT_USED
)
{
n_free
++
;
n_free
++
;
}
}
mutex_exit
(
&
block
->
mutex
);
}
}
if
(
n_lru
+
n_free
>
buf_pool
->
curr_size
)
{
if
(
n_lru
+
n_free
>
buf_pool
->
curr_size
)
{
...
@@ -2303,16 +2357,21 @@ buf_all_freed(void)
...
@@ -2303,16 +2357,21 @@ buf_all_freed(void)
block
=
buf_pool_get_nth_block
(
buf_pool
,
i
);
block
=
buf_pool_get_nth_block
(
buf_pool
,
i
);
mutex_enter
(
&
block
->
mutex
);
if
(
block
->
state
==
BUF_BLOCK_FILE_PAGE
)
{
if
(
block
->
state
==
BUF_BLOCK_FILE_PAGE
)
{
if
(
!
buf_flush_ready_for_replace
(
block
))
{
if
(
!
buf_flush_ready_for_replace
(
block
))
{
fprintf
(
stderr
,
fprintf
(
stderr
,
"Page %lu %lu still fixed or dirty
\n
"
,
"Page %lu %lu still fixed or dirty
\n
"
,
(
ulong
)
block
->
space
,
(
ulong
)
block
->
offset
);
(
ulong
)
block
->
space
,
(
ulong
)
block
->
offset
);
ut_error
;
ut_error
;
}
}
}
}
mutex_exit
(
&
block
->
mutex
);
}
}
mutex_exit
(
&
(
buf_pool
->
mutex
));
mutex_exit
(
&
(
buf_pool
->
mutex
));
...
...
innobase/buf/buf0flu.c
View file @
59182189
...
@@ -114,6 +114,7 @@ buf_flush_ready_for_replace(
...
@@ -114,6 +114,7 @@ buf_flush_ready_for_replace(
{
{
#ifdef UNIV_SYNC_DEBUG
#ifdef UNIV_SYNC_DEBUG
ut_ad
(
mutex_own
(
&
(
buf_pool
->
mutex
)));
ut_ad
(
mutex_own
(
&
(
buf_pool
->
mutex
)));
ut_ad
(
mutex_own
(
&
block
->
mutex
));
#endif
/* UNIV_SYNC_DEBUG */
#endif
/* UNIV_SYNC_DEBUG */
if
(
block
->
state
!=
BUF_BLOCK_FILE_PAGE
)
{
if
(
block
->
state
!=
BUF_BLOCK_FILE_PAGE
)
{
ut_print_timestamp
(
stderr
);
ut_print_timestamp
(
stderr
);
...
@@ -148,6 +149,7 @@ buf_flush_ready_for_flush(
...
@@ -148,6 +149,7 @@ buf_flush_ready_for_flush(
{
{
#ifdef UNIV_SYNC_DEBUG
#ifdef UNIV_SYNC_DEBUG
ut_ad
(
mutex_own
(
&
(
buf_pool
->
mutex
)));
ut_ad
(
mutex_own
(
&
(
buf_pool
->
mutex
)));
ut_ad
(
mutex_own
(
&
(
block
->
mutex
)));
#endif
/* UNIV_SYNC_DEBUG */
#endif
/* UNIV_SYNC_DEBUG */
ut_a
(
block
->
state
==
BUF_BLOCK_FILE_PAGE
);
ut_a
(
block
->
state
==
BUF_BLOCK_FILE_PAGE
);
...
@@ -533,8 +535,15 @@ buf_flush_try_page(
...
@@ -533,8 +535,15 @@ buf_flush_try_page(
ut_a
(
!
block
||
block
->
state
==
BUF_BLOCK_FILE_PAGE
);
ut_a
(
!
block
||
block
->
state
==
BUF_BLOCK_FILE_PAGE
);
if
(
!
block
)
{
mutex_exit
(
&
(
buf_pool
->
mutex
));
return
(
0
);
}
mutex_enter
(
&
block
->
mutex
);
if
(
flush_type
==
BUF_FLUSH_LIST
if
(
flush_type
==
BUF_FLUSH_LIST
&&
b
lock
&&
b
uf_flush_ready_for_flush
(
block
,
flush_type
))
{
&&
buf_flush_ready_for_flush
(
block
,
flush_type
))
{
block
->
io_fix
=
BUF_IO_WRITE
;
block
->
io_fix
=
BUF_IO_WRITE
;
...
@@ -572,6 +581,7 @@ buf_flush_try_page(
...
@@ -572,6 +581,7 @@ buf_flush_try_page(
locked
=
TRUE
;
locked
=
TRUE
;
}
}
mutex_exit
(
&
block
->
mutex
);
mutex_exit
(
&
(
buf_pool
->
mutex
));
mutex_exit
(
&
(
buf_pool
->
mutex
));
if
(
!
locked
)
{
if
(
!
locked
)
{
...
@@ -590,7 +600,7 @@ buf_flush_try_page(
...
@@ -590,7 +600,7 @@ buf_flush_try_page(
return
(
1
);
return
(
1
);
}
else
if
(
flush_type
==
BUF_FLUSH_LRU
&&
block
}
else
if
(
flush_type
==
BUF_FLUSH_LRU
&&
buf_flush_ready_for_flush
(
block
,
flush_type
))
{
&&
buf_flush_ready_for_flush
(
block
,
flush_type
))
{
/* VERY IMPORTANT:
/* VERY IMPORTANT:
...
@@ -631,13 +641,14 @@ buf_flush_try_page(
...
@@ -631,13 +641,14 @@ buf_flush_try_page(
buf_pool mutex: this ensures that the latch is acquired
buf_pool mutex: this ensures that the latch is acquired
immediately. */
immediately. */
mutex_exit
(
&
block
->
mutex
);
mutex_exit
(
&
(
buf_pool
->
mutex
));
mutex_exit
(
&
(
buf_pool
->
mutex
));
buf_flush_write_block_low
(
block
);
buf_flush_write_block_low
(
block
);
return
(
1
);
return
(
1
);
}
else
if
(
flush_type
==
BUF_FLUSH_SINGLE_PAGE
&&
block
}
else
if
(
flush_type
==
BUF_FLUSH_SINGLE_PAGE
&&
buf_flush_ready_for_flush
(
block
,
flush_type
))
{
&&
buf_flush_ready_for_flush
(
block
,
flush_type
))
{
block
->
io_fix
=
BUF_IO_WRITE
;
block
->
io_fix
=
BUF_IO_WRITE
;
...
@@ -664,6 +675,7 @@ buf_flush_try_page(
...
@@ -664,6 +675,7 @@ buf_flush_try_page(
(
buf_pool
->
n_flush
[
flush_type
])
++
;
(
buf_pool
->
n_flush
[
flush_type
])
++
;
mutex_exit
(
&
block
->
mutex
);
mutex_exit
(
&
(
buf_pool
->
mutex
));
mutex_exit
(
&
(
buf_pool
->
mutex
));
rw_lock_s_lock_gen
(
&
(
block
->
lock
),
BUF_IO_WRITE
);
rw_lock_s_lock_gen
(
&
(
block
->
lock
),
BUF_IO_WRITE
);
...
@@ -678,11 +690,12 @@ buf_flush_try_page(
...
@@ -678,11 +690,12 @@ buf_flush_try_page(
buf_flush_write_block_low
(
block
);
buf_flush_write_block_low
(
block
);
return
(
1
);
return
(
1
);
}
else
{
}
mutex_exit
(
&
block
->
mutex
);
mutex_exit
(
&
(
buf_pool
->
mutex
));
mutex_exit
(
&
(
buf_pool
->
mutex
));
return
(
0
);
return
(
0
);
}
}
}
/***************************************************************
/***************************************************************
...
@@ -727,34 +740,48 @@ buf_flush_try_neighbors(
...
@@ -727,34 +740,48 @@ buf_flush_try_neighbors(
block
=
buf_page_hash_get
(
space
,
i
);
block
=
buf_page_hash_get
(
space
,
i
);
ut_a
(
!
block
||
block
->
state
==
BUF_BLOCK_FILE_PAGE
);
ut_a
(
!
block
||
block
->
state
==
BUF_BLOCK_FILE_PAGE
);
if
(
block
&&
flush_type
==
BUF_FLUSH_LRU
&&
i
!=
offset
if
(
!
block
)
{
continue
;
}
else
if
(
flush_type
==
BUF_FLUSH_LRU
&&
i
!=
offset
&&
!
block
->
old
)
{
&&
!
block
->
old
)
{
/* We avoid flushing 'non-old' blocks in an LRU flush,
/* We avoid flushing 'non-old' blocks in an LRU flush,
because the flushed blocks are soon freed */
because the flushed blocks are soon freed */
continue
;
continue
;
}
}
else
{
mutex_enter
(
&
block
->
mutex
);
if
(
block
&&
buf_flush_ready_for_flush
(
block
,
flush_type
)
if
(
buf_flush_ready_for_flush
(
block
,
flush_type
)
&&
(
i
==
offset
||
block
->
buf_fix_count
==
0
))
{
&&
(
i
==
offset
||
block
->
buf_fix_count
==
0
))
{
/* We only try to flush those neighbors != offset
/* We only try to flush those
where the buf fix count is zero, as we then know that
neighbors != offset where the buf fix count is
we probably can latch the page without a semaphore
zero, as we then know that we probably can
wait. Semaphore waits are expensive because we must
latch the page without a semaphore wait.
Semaphore waits are expensive because we must
flush the doublewrite buffer before we start
flush the doublewrite buffer before we start
waiting. */
waiting. */
mutex_exit
(
&
block
->
mutex
);
mutex_exit
(
&
(
buf_pool
->
mutex
));
mutex_exit
(
&
(
buf_pool
->
mutex
));
/* Note: as we release the buf_pool mutex above, in
/* Note: as we release the buf_pool mutex
buf_flush_try_page we cannot be sure the page is still
above, in buf_flush_try_page we cannot be sure
in a flushable state: therefore we check it again
the page is still in a flushable state:
inside that function. */
therefore we check it again inside that
function. */
count
+=
buf_flush_try_page
(
space
,
i
,
flush_type
);
count
+=
buf_flush_try_page
(
space
,
i
,
flush_type
);
mutex_enter
(
&
(
buf_pool
->
mutex
));
mutex_enter
(
&
(
buf_pool
->
mutex
));
}
else
{
mutex_exit
(
&
block
->
mutex
);
}
}
}
}
}
...
@@ -848,12 +875,15 @@ buf_flush_batch(
...
@@ -848,12 +875,15 @@ buf_flush_batch(
while
((
block
!=
NULL
)
&&
!
found
)
{
while
((
block
!=
NULL
)
&&
!
found
)
{
ut_a
(
block
->
state
==
BUF_BLOCK_FILE_PAGE
);
ut_a
(
block
->
state
==
BUF_BLOCK_FILE_PAGE
);
mutex_enter
(
&
block
->
mutex
);
if
(
buf_flush_ready_for_flush
(
block
,
flush_type
))
{
if
(
buf_flush_ready_for_flush
(
block
,
flush_type
))
{
found
=
TRUE
;
found
=
TRUE
;
space
=
block
->
space
;
space
=
block
->
space
;
offset
=
block
->
offset
;
offset
=
block
->
offset
;
mutex_exit
(
&
block
->
mutex
);
mutex_exit
(
&
(
buf_pool
->
mutex
));
mutex_exit
(
&
(
buf_pool
->
mutex
));
old_page_count
=
page_count
;
old_page_count
=
page_count
;
...
@@ -871,10 +901,14 @@ buf_flush_batch(
...
@@ -871,10 +901,14 @@ buf_flush_batch(
}
else
if
(
flush_type
==
BUF_FLUSH_LRU
)
{
}
else
if
(
flush_type
==
BUF_FLUSH_LRU
)
{
mutex_exit
(
&
block
->
mutex
);
block
=
UT_LIST_GET_PREV
(
LRU
,
block
);
block
=
UT_LIST_GET_PREV
(
LRU
,
block
);
}
else
{
}
else
{
ut_ad
(
flush_type
==
BUF_FLUSH_LIST
);
ut_ad
(
flush_type
==
BUF_FLUSH_LIST
);
mutex_exit
(
&
block
->
mutex
);
block
=
UT_LIST_GET_PREV
(
flush_list
,
block
);
block
=
UT_LIST_GET_PREV
(
flush_list
,
block
);
}
}
}
}
...
@@ -951,10 +985,14 @@ buf_flush_LRU_recommendation(void)
...
@@ -951,10 +985,14 @@ buf_flush_LRU_recommendation(void)
+
BUF_FLUSH_EXTRA_MARGIN
)
+
BUF_FLUSH_EXTRA_MARGIN
)
&&
(
distance
<
BUF_LRU_FREE_SEARCH_LEN
))
{
&&
(
distance
<
BUF_LRU_FREE_SEARCH_LEN
))
{
mutex_enter
(
&
block
->
mutex
);
if
(
buf_flush_ready_for_replace
(
block
))
{
if
(
buf_flush_ready_for_replace
(
block
))
{
n_replaceable
++
;
n_replaceable
++
;
}
}
mutex_exit
(
&
block
->
mutex
);
distance
++
;
distance
++
;
block
=
UT_LIST_GET_PREV
(
LRU
,
block
);
block
=
UT_LIST_GET_PREV
(
LRU
,
block
);
...
...
innobase/buf/buf0lru.c
View file @
59182189
...
@@ -86,6 +86,9 @@ buf_LRU_invalidate_tablespace(
...
@@ -86,6 +86,9 @@ buf_LRU_invalidate_tablespace(
block
=
UT_LIST_GET_LAST
(
buf_pool
->
LRU
);
block
=
UT_LIST_GET_LAST
(
buf_pool
->
LRU
);
while
(
block
!=
NULL
)
{
while
(
block
!=
NULL
)
{
mutex_enter
(
&
block
->
mutex
);
ut_a
(
block
->
state
==
BUF_BLOCK_FILE_PAGE
);
ut_a
(
block
->
state
==
BUF_BLOCK_FILE_PAGE
);
if
(
block
->
space
==
id
if
(
block
->
space
==
id
...
@@ -112,6 +115,8 @@ buf_LRU_invalidate_tablespace(
...
@@ -112,6 +115,8 @@ buf_LRU_invalidate_tablespace(
if
(
block
->
is_hashed
)
{
if
(
block
->
is_hashed
)
{
page_no
=
block
->
offset
;
page_no
=
block
->
offset
;
mutex_exit
(
&
block
->
mutex
);
mutex_exit
(
&
(
buf_pool
->
mutex
));
mutex_exit
(
&
(
buf_pool
->
mutex
));
/* Note that the following call will acquire
/* Note that the following call will acquire
...
@@ -138,6 +143,7 @@ buf_LRU_invalidate_tablespace(
...
@@ -138,6 +143,7 @@ buf_LRU_invalidate_tablespace(
buf_LRU_block_free_hashed_page
(
block
);
buf_LRU_block_free_hashed_page
(
block
);
}
}
next_page:
next_page:
mutex_exit
(
&
block
->
mutex
);
block
=
UT_LIST_GET_PREV
(
LRU
,
block
);
block
=
UT_LIST_GET_PREV
(
LRU
,
block
);
}
}
...
@@ -211,6 +217,9 @@ buf_LRU_search_and_free_block(
...
@@ -211,6 +217,9 @@ buf_LRU_search_and_free_block(
while
(
block
!=
NULL
)
{
while
(
block
!=
NULL
)
{
ut_a
(
block
->
in_LRU_list
);
ut_a
(
block
->
in_LRU_list
);
mutex_enter
(
&
block
->
mutex
);
if
(
buf_flush_ready_for_replace
(
block
))
{
if
(
buf_flush_ready_for_replace
(
block
))
{
if
(
buf_debug_prints
)
{
if
(
buf_debug_prints
)
{
...
@@ -223,6 +232,7 @@ buf_LRU_search_and_free_block(
...
@@ -223,6 +232,7 @@ buf_LRU_search_and_free_block(
buf_LRU_block_remove_hashed_page
(
block
);
buf_LRU_block_remove_hashed_page
(
block
);
mutex_exit
(
&
(
buf_pool
->
mutex
));
mutex_exit
(
&
(
buf_pool
->
mutex
));
mutex_exit
(
&
block
->
mutex
);
/* Remove possible adaptive hash index built on the
/* Remove possible adaptive hash index built on the
page; in the case of AWE the block may not have a
page; in the case of AWE the block may not have a
...
@@ -231,15 +241,21 @@ buf_LRU_search_and_free_block(
...
@@ -231,15 +241,21 @@ buf_LRU_search_and_free_block(
if
(
block
->
frame
)
{
if
(
block
->
frame
)
{
btr_search_drop_page_hash_index
(
block
->
frame
);
btr_search_drop_page_hash_index
(
block
->
frame
);
}
}
mutex_enter
(
&
(
buf_pool
->
mutex
));
ut_a
(
block
->
buf_fix_count
==
0
);
ut_a
(
block
->
buf_fix_count
==
0
);
mutex_enter
(
&
(
buf_pool
->
mutex
));
mutex_enter
(
&
block
->
mutex
);
buf_LRU_block_free_hashed_page
(
block
);
buf_LRU_block_free_hashed_page
(
block
);
freed
=
TRUE
;
freed
=
TRUE
;
mutex_exit
(
&
block
->
mutex
);
break
;
break
;
}
}
mutex_exit
(
&
block
->
mutex
);
block
=
UT_LIST_GET_PREV
(
LRU
,
block
);
block
=
UT_LIST_GET_PREV
(
LRU
,
block
);
distance
++
;
distance
++
;
...
@@ -413,8 +429,12 @@ buf_LRU_get_free_block(void)
...
@@ -413,8 +429,12 @@ buf_LRU_get_free_block(void)
}
}
}
}
mutex_enter
(
&
block
->
mutex
);
block
->
state
=
BUF_BLOCK_READY_FOR_USE
;
block
->
state
=
BUF_BLOCK_READY_FOR_USE
;
mutex_exit
(
&
block
->
mutex
);
mutex_exit
(
&
(
buf_pool
->
mutex
));
mutex_exit
(
&
(
buf_pool
->
mutex
));
if
(
started_monitor
)
{
if
(
started_monitor
)
{
...
@@ -815,6 +835,7 @@ buf_LRU_block_free_non_file_page(
...
@@ -815,6 +835,7 @@ buf_LRU_block_free_non_file_page(
{
{
#ifdef UNIV_SYNC_DEBUG
#ifdef UNIV_SYNC_DEBUG
ut_ad
(
mutex_own
(
&
(
buf_pool
->
mutex
)));
ut_ad
(
mutex_own
(
&
(
buf_pool
->
mutex
)));
ut_ad
(
mutex_own
(
&
block
->
mutex
));
#endif
/* UNIV_SYNC_DEBUG */
#endif
/* UNIV_SYNC_DEBUG */
ut_ad
(
block
);
ut_ad
(
block
);
...
@@ -854,6 +875,7 @@ buf_LRU_block_remove_hashed_page(
...
@@ -854,6 +875,7 @@ buf_LRU_block_remove_hashed_page(
{
{
#ifdef UNIV_SYNC_DEBUG
#ifdef UNIV_SYNC_DEBUG
ut_ad
(
mutex_own
(
&
(
buf_pool
->
mutex
)));
ut_ad
(
mutex_own
(
&
(
buf_pool
->
mutex
)));
ut_ad
(
mutex_own
(
&
block
->
mutex
));
#endif
/* UNIV_SYNC_DEBUG */
#endif
/* UNIV_SYNC_DEBUG */
ut_ad
(
block
);
ut_ad
(
block
);
...
@@ -911,6 +933,7 @@ buf_LRU_block_free_hashed_page(
...
@@ -911,6 +933,7 @@ buf_LRU_block_free_hashed_page(
{
{
#ifdef UNIV_SYNC_DEBUG
#ifdef UNIV_SYNC_DEBUG
ut_ad
(
mutex_own
(
&
(
buf_pool
->
mutex
)));
ut_ad
(
mutex_own
(
&
(
buf_pool
->
mutex
)));
ut_ad
(
mutex_own
(
&
block
->
mutex
));
#endif
/* UNIV_SYNC_DEBUG */
#endif
/* UNIV_SYNC_DEBUG */
ut_a
(
block
->
state
==
BUF_BLOCK_REMOVE_HASH
);
ut_a
(
block
->
state
==
BUF_BLOCK_REMOVE_HASH
);
...
...
innobase/include/buf0buf.h
View file @
59182189
...
@@ -455,8 +455,8 @@ Gets the mutex number protecting the page record lock hash chain in the lock
...
@@ -455,8 +455,8 @@ Gets the mutex number protecting the page record lock hash chain in the lock
table. */
table. */
UNIV_INLINE
UNIV_INLINE
mutex_t
*
mutex_t
*
buf_frame_get_
lock_
mutex
(
buf_frame_get_mutex
(
/*================
=====
*/
/*================*/
/* out: mutex */
/* out: mutex */
byte
*
ptr
);
/* in: pointer to within a buffer frame */
byte
*
ptr
);
/* in: pointer to within a buffer frame */
/***********************************************************************
/***********************************************************************
...
@@ -699,7 +699,10 @@ struct buf_block_struct{
...
@@ -699,7 +699,10 @@ struct buf_block_struct{
ulint
magic_n
;
/* magic number to check */
ulint
magic_n
;
/* magic number to check */
ulint
state
;
/* state of the control block:
ulint
state
;
/* state of the control block:
BUF_BLOCK_NOT_USED, ... */
BUF_BLOCK_NOT_USED, ...; changing
this is only allowed when a thread
has BOTH the buffer pool mutex AND
block->mutex locked */
byte
*
frame
;
/* pointer to buffer frame which
byte
*
frame
;
/* pointer to buffer frame which
is of size UNIV_PAGE_SIZE, and
is of size UNIV_PAGE_SIZE, and
aligned to an address divisible by
aligned to an address divisible by
...
@@ -717,8 +720,12 @@ struct buf_block_struct{
...
@@ -717,8 +720,12 @@ struct buf_block_struct{
ulint
offset
;
/* page number within the space */
ulint
offset
;
/* page number within the space */
ulint
lock_hash_val
;
/* hashed value of the page address
ulint
lock_hash_val
;
/* hashed value of the page address
in the record lock hash table */
in the record lock hash table */
mutex_t
*
lock_mutex
;
/* mutex protecting the chain in the
mutex_t
mutex
;
/* mutex protecting this block:
record lock hash table */
state (also protected by the buffer
pool mutex), io_fix, buf_fix_count,
and accessed; we introduce this new
mutex in InnoDB-5.1 to relieve
contention on the buffer pool mutex */
rw_lock_t
lock
;
/* read-write lock of the buffer
rw_lock_t
lock
;
/* read-write lock of the buffer
frame */
frame */
buf_block_t
*
hash
;
/* node used in chaining to the page
buf_block_t
*
hash
;
/* node used in chaining to the page
...
@@ -774,20 +781,27 @@ struct buf_block_struct{
...
@@ -774,20 +781,27 @@ struct buf_block_struct{
in heuristic algorithms, because of
in heuristic algorithms, because of
the possibility of a wrap-around! */
the possibility of a wrap-around! */
ulint
freed_page_clock
;
/* the value of freed_page_clock
ulint
freed_page_clock
;
/* the value of freed_page_clock
buffer pool when this block was
of the buffer pool when this block was
last time put to the head of the
the last time put to the head of the
LRU list */
LRU list; a thread is allowed to
read this for heuristic purposes
without holding any mutex or latch */
ibool
old
;
/* TRUE if the block is in the old
ibool
old
;
/* TRUE if the block is in the old
blocks in the LRU list */
blocks in the LRU list */
ibool
accessed
;
/* TRUE if the page has been accessed
ibool
accessed
;
/* TRUE if the page has been accessed
while in the buffer pool: read-ahead
while in the buffer pool: read-ahead
may read in pages which have not been
may read in pages which have not been
accessed yet */
accessed yet; this is protected by
block->mutex; a thread is allowed to
read this for heuristic purposes
without holding any mutex or latch */
ulint
buf_fix_count
;
/* count of how manyfold this block
ulint
buf_fix_count
;
/* count of how manyfold this block
is currently bufferfixed */
is currently bufferfixed; this is
protected by block->mutex */
ulint
io_fix
;
/* if a read is pending to the frame,
ulint
io_fix
;
/* if a read is pending to the frame,
io_fix is BUF_IO_READ, in the case
io_fix is BUF_IO_READ, in the case
of a write BUF_IO_WRITE, otherwise 0 */
of a write BUF_IO_WRITE, otherwise 0;
this is protected by block->mutex */
/* 4. Optimistic search field */
/* 4. Optimistic search field */
dulint
modify_clock
;
/* this clock is incremented every
dulint
modify_clock
;
/* this clock is incremented every
...
@@ -940,7 +954,9 @@ struct buf_pool_struct{
...
@@ -940,7 +954,9 @@ struct buf_pool_struct{
number of buffer blocks removed from
number of buffer blocks removed from
the end of the LRU list; NOTE that
the end of the LRU list; NOTE that
this counter may wrap around at 4
this counter may wrap around at 4
billion! */
billion! A thread is allowed to
read this for heuristic purposes
without holding any mutex or latch */
ulint
LRU_flush_ended
;
/* when an LRU flush ends for a page,
ulint
LRU_flush_ended
;
/* when an LRU flush ends for a page,
this is incremented by one; this is
this is incremented by one; this is
set to zero when a buffer block is
set to zero when a buffer block is
...
...
innobase/include/buf0buf.ic
View file @
59182189
...
@@ -333,8 +333,8 @@ Gets the mutex number protecting the page record lock hash chain in the lock
...
@@ -333,8 +333,8 @@ Gets the mutex number protecting the page record lock hash chain in the lock
table. */
table. */
UNIV_INLINE
UNIV_INLINE
mutex_t*
mutex_t*
buf_frame_get_
lock_
mutex(
buf_frame_get_mutex(
/*================
=====
*/
/*================*/
/* out: mutex */
/* out: mutex */
byte* ptr) /* in: pointer to within a buffer frame */
byte* ptr) /* in: pointer to within a buffer frame */
{
{
...
@@ -342,7 +342,7 @@ buf_frame_get_lock_mutex(
...
@@ -342,7 +342,7 @@ buf_frame_get_lock_mutex(
block = buf_block_align(ptr);
block = buf_block_align(ptr);
return(
block->lock_
mutex);
return(
&block->
mutex);
}
}
/*************************************************************************
/*************************************************************************
...
@@ -521,6 +521,7 @@ buf_block_buf_fix_inc_debug(
...
@@ -521,6 +521,7 @@ buf_block_buf_fix_inc_debug(
ret = rw_lock_s_lock_func_nowait(&(block->debug_latch), file, line);
ret = rw_lock_s_lock_func_nowait(&(block->debug_latch), file, line);
ut_ad(ret == TRUE);
ut_ad(ret == TRUE);
ut_ad(mutex_own(&block->mutex));
#endif
#endif
block->buf_fix_count++;
block->buf_fix_count++;
}
}
...
@@ -533,6 +534,9 @@ buf_block_buf_fix_inc(
...
@@ -533,6 +534,9 @@ buf_block_buf_fix_inc(
/*==================*/
/*==================*/
buf_block_t* block) /* in: block to bufferfix */
buf_block_t* block) /* in: block to bufferfix */
{
{
#ifdef UNIV_SYNC_DEBUG
ut_ad(mutex_own(&block->mutex));
#endif
block->buf_fix_count++;
block->buf_fix_count++;
}
}
#endif /* UNIV_SYNC_DEBUG */
#endif /* UNIV_SYNC_DEBUG */
...
@@ -627,23 +631,24 @@ buf_page_release(
...
@@ -627,23 +631,24 @@ buf_page_release(
ut_ad(block);
ut_ad(block);
mutex_enter_fast(&(buf_pool->mutex));
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
ut_a(block->buf_fix_count > 0);
ut_a(block->buf_fix_count > 0);
if (rw_latch == RW_X_LATCH && mtr->modifications) {
if (rw_latch == RW_X_LATCH && mtr->modifications) {
mutex_enter(&buf_pool->mutex);
buf_flush_note_modification(block, mtr);
buf_flush_note_modification(block, mtr);
mutex_exit(&buf_pool->mutex);
}
}
mutex_enter(&block->mutex);
#ifdef UNIV_SYNC_DEBUG
#ifdef UNIV_SYNC_DEBUG
rw_lock_s_unlock(&(block->debug_latch));
rw_lock_s_unlock(&(block->debug_latch));
#endif
#endif
buf_fix_count = block->buf_fix_count;
buf_fix_count = block->buf_fix_count;
block->buf_fix_count = buf_fix_count - 1;
block->buf_fix_count = buf_fix_count - 1;
mutex_exit(&
(buf_pool->mutex)
);
mutex_exit(&
block->mutex
);
if (rw_latch == RW_S_LATCH) {
if (rw_latch == RW_S_LATCH) {
rw_lock_s_unlock(&(block->lock));
rw_lock_s_unlock(&(block->lock));
...
...
innobase/include/sync0arr.h
View file @
59182189
...
@@ -75,17 +75,12 @@ sync_array_free_cell(
...
@@ -75,17 +75,12 @@ sync_array_free_cell(
sync_array_t
*
arr
,
/* in: wait array */
sync_array_t
*
arr
,
/* in: wait array */
ulint
index
);
/* in: index of the cell in array */
ulint
index
);
/* in: index of the cell in array */
/**************************************************************************
/**************************************************************************
Looks for the cells in the wait array which refer
Note that one of the wait objects was signalled. */
to the wait object specified,
and sets their corresponding events to the signaled state. In this
way releases the threads waiting for the object to contend for the object.
It is possible that no such cell is found, in which case does nothing. */
void
void
sync_array_signal_object
(
sync_array_object_signalled
(
/*=====================*/
/*========================*/
sync_array_t
*
arr
,
/* in: wait array */
sync_array_t
*
arr
);
/* in: wait array */
void
*
object
);
/* in: wait object */
/**************************************************************************
/**************************************************************************
If the wakeup algorithm does not work perfectly at semaphore relases,
If the wakeup algorithm does not work perfectly at semaphore relases,
this function will do the waking (see the comment in mutex_exit). This
this function will do the waking (see the comment in mutex_exit). This
...
...
innobase/include/sync0rw.h
View file @
59182189
...
@@ -410,6 +410,7 @@ blocked by readers, a writer may queue for the lock by setting the writer
...
@@ -410,6 +410,7 @@ blocked by readers, a writer may queue for the lock by setting the writer
field. Then no new readers are allowed in. */
field. Then no new readers are allowed in. */
struct
rw_lock_struct
{
struct
rw_lock_struct
{
os_event_t
event
;
/* Used by sync0arr.c for thread queueing */
ulint
reader_count
;
/* Number of readers who have locked this
ulint
reader_count
;
/* Number of readers who have locked this
lock in the shared mode */
lock in the shared mode */
ulint
writer
;
/* This field is set to RW_LOCK_EX if there
ulint
writer
;
/* This field is set to RW_LOCK_EX if there
...
...
innobase/include/sync0rw.ic
View file @
59182189
...
@@ -380,7 +380,8 @@ rw_lock_s_unlock_func(
...
@@ -380,7 +380,8 @@ rw_lock_s_unlock_func(
mutex_exit(mutex);
mutex_exit(mutex);
if (sg == TRUE) {
if (sg == TRUE) {
sync_array_signal_object(sync_primary_wait_array, lock);
os_event_set(lock->event);
sync_array_object_signalled(sync_primary_wait_array);
}
}
ut_ad(rw_lock_validate(lock));
ut_ad(rw_lock_validate(lock));
...
@@ -459,7 +460,8 @@ rw_lock_x_unlock_func(
...
@@ -459,7 +460,8 @@ rw_lock_x_unlock_func(
mutex_exit(&(lock->mutex));
mutex_exit(&(lock->mutex));
if (sg == TRUE) {
if (sg == TRUE) {
sync_array_signal_object(sync_primary_wait_array, lock);
os_event_set(lock->event);
sync_array_object_signalled(sync_primary_wait_array);
}
}
ut_ad(rw_lock_validate(lock));
ut_ad(rw_lock_validate(lock));
...
...
innobase/include/sync0sync.h
View file @
59182189
...
@@ -447,6 +447,7 @@ Do not use its fields directly! The structure used in the spin lock
...
@@ -447,6 +447,7 @@ Do not use its fields directly! The structure used in the spin lock
implementation of a mutual exclusion semaphore. */
implementation of a mutual exclusion semaphore. */
struct
mutex_struct
{
struct
mutex_struct
{
os_event_t
event
;
/* Used by sync0arr.c for the wait queue */
ulint
lock_word
;
/* This ulint is the target of the atomic
ulint
lock_word
;
/* This ulint is the target of the atomic
test-and-set instruction in Win32 */
test-and-set instruction in Win32 */
#if !defined(_WIN32) || !defined(UNIV_CAN_USE_X86_ASSEMBLER)
#if !defined(_WIN32) || !defined(UNIV_CAN_USE_X86_ASSEMBLER)
...
...
innobase/os/os0sync.c
View file @
59182189
...
@@ -21,6 +21,7 @@ Created 9/6/1995 Heikki Tuuri
...
@@ -21,6 +21,7 @@ Created 9/6/1995 Heikki Tuuri
/* Type definition for an operating system mutex struct */
/* Type definition for an operating system mutex struct */
struct
os_mutex_struct
{
struct
os_mutex_struct
{
os_event_t
event
;
/* Used by sync0arr.c for queing threads */
void
*
handle
;
/* OS handle to mutex */
void
*
handle
;
/* OS handle to mutex */
ulint
count
;
/* we use this counter to check
ulint
count
;
/* we use this counter to check
that the same thread does not
that the same thread does not
...
@@ -35,6 +36,7 @@ struct os_mutex_struct{
...
@@ -35,6 +36,7 @@ struct os_mutex_struct{
/* Mutex protecting counts and the lists of OS mutexes and events */
/* Mutex protecting counts and the lists of OS mutexes and events */
os_mutex_t
os_sync_mutex
;
os_mutex_t
os_sync_mutex
;
ibool
os_sync_mutex_inited
=
FALSE
;
ibool
os_sync_mutex_inited
=
FALSE
;
ibool
os_sync_free_called
=
FALSE
;
/* This is incremented by 1 in os_thread_create and decremented by 1 in
/* This is incremented by 1 in os_thread_create and decremented by 1 in
os_thread_exit */
os_thread_exit */
...
@@ -50,6 +52,10 @@ ulint os_event_count = 0;
...
@@ -50,6 +52,10 @@ ulint os_event_count = 0;
ulint
os_mutex_count
=
0
;
ulint
os_mutex_count
=
0
;
ulint
os_fast_mutex_count
=
0
;
ulint
os_fast_mutex_count
=
0
;
/* Because a mutex is embedded inside an event and there is an
event embedded inside a mutex, on free, this generates a recursive call.
This version of the free event function doesn't acquire the global lock */
static
void
os_event_free_internal
(
os_event_t
event
);
/*************************************************************
/*************************************************************
Initializes global event and OS 'slow' mutex lists. */
Initializes global event and OS 'slow' mutex lists. */
...
@@ -76,6 +82,7 @@ os_sync_free(void)
...
@@ -76,6 +82,7 @@ os_sync_free(void)
os_event_t
event
;
os_event_t
event
;
os_mutex_t
mutex
;
os_mutex_t
mutex
;
os_sync_free_called
=
TRUE
;
event
=
UT_LIST_GET_FIRST
(
os_event_list
);
event
=
UT_LIST_GET_FIRST
(
os_event_list
);
while
(
event
)
{
while
(
event
)
{
...
@@ -99,6 +106,7 @@ os_sync_free(void)
...
@@ -99,6 +106,7 @@ os_sync_free(void)
mutex
=
UT_LIST_GET_FIRST
(
os_mutex_list
);
mutex
=
UT_LIST_GET_FIRST
(
os_mutex_list
);
}
}
os_sync_free_called
=
FALSE
;
}
}
/*************************************************************
/*************************************************************
...
@@ -146,14 +154,21 @@ os_event_create(
...
@@ -146,14 +154,21 @@ os_event_create(
event
->
signal_count
=
0
;
event
->
signal_count
=
0
;
#endif
/* __WIN__ */
#endif
/* __WIN__ */
/* Put to the list of events */
/* The os_sync_mutex can be NULL because during startup an event
can be created [ because it's embedded in the mutex/rwlock ] before
this module has been initialized */
if
(
os_sync_mutex
!=
NULL
)
{
os_mutex_enter
(
os_sync_mutex
);
os_mutex_enter
(
os_sync_mutex
);
}
/* Put to the list of events */
UT_LIST_ADD_FIRST
(
os_event_list
,
os_event_list
,
event
);
UT_LIST_ADD_FIRST
(
os_event_list
,
os_event_list
,
event
);
os_event_count
++
;
os_event_count
++
;
if
(
os_sync_mutex
!=
NULL
)
{
os_mutex_exit
(
os_sync_mutex
);
os_mutex_exit
(
os_sync_mutex
);
}
return
(
event
);
return
(
event
);
}
}
...
@@ -255,6 +270,35 @@ os_event_reset(
...
@@ -255,6 +270,35 @@ os_event_reset(
#endif
#endif
}
}
/**************************************************************
Frees an event object, without acquiring the global lock. */
static
void
os_event_free_internal
(
/*===================*/
os_event_t
event
)
/* in: event to free */
{
#ifdef __WIN__
ut_a
(
event
);
ut_a
(
CloseHandle
(
event
->
handle
));
#else
ut_a
(
event
);
/* This is to avoid freeing the mutex twice */
os_fast_mutex_free
(
&
(
event
->
os_mutex
));
ut_a
(
0
==
pthread_cond_destroy
(
&
(
event
->
cond_var
)));
#endif
/* Remove from the list of events */
UT_LIST_REMOVE
(
os_event_list
,
os_event_list
,
event
);
os_event_count
--
;
ut_free
(
event
);
}
/**************************************************************
/**************************************************************
Frees an event object. */
Frees an event object. */
...
@@ -456,6 +500,7 @@ os_mutex_create(
...
@@ -456,6 +500,7 @@ os_mutex_create(
mutex_str
->
handle
=
mutex
;
mutex_str
->
handle
=
mutex
;
mutex_str
->
count
=
0
;
mutex_str
->
count
=
0
;
mutex_str
->
event
=
os_event_create
(
NULL
);
if
(
os_sync_mutex_inited
)
{
if
(
os_sync_mutex_inited
)
{
/* When creating os_sync_mutex itself we cannot reserve it */
/* When creating os_sync_mutex itself we cannot reserve it */
...
@@ -532,6 +577,10 @@ os_mutex_free(
...
@@ -532,6 +577,10 @@ os_mutex_free(
{
{
ut_a
(
mutex
);
ut_a
(
mutex
);
if
(
!
os_sync_free_called
)
{
os_event_free_internal
(
mutex
->
event
);
}
if
(
os_sync_mutex_inited
)
{
if
(
os_sync_mutex_inited
)
{
os_mutex_enter
(
os_sync_mutex
);
os_mutex_enter
(
os_sync_mutex
);
}
}
...
...
innobase/sync/sync0arr.c
View file @
59182189
...
@@ -62,9 +62,6 @@ struct sync_cell_struct {
...
@@ -62,9 +62,6 @@ struct sync_cell_struct {
ibool
waiting
;
/* TRUE if the thread has already
ibool
waiting
;
/* TRUE if the thread has already
called sync_array_event_wait
called sync_array_event_wait
on this cell */
on this cell */
ibool
event_set
;
/* TRUE if the event is set */
os_event_t
event
;
/* operating system event
semaphore handle */
time_t
reservation_time
;
/* time when the thread reserved
time_t
reservation_time
;
/* time when the thread reserved
the wait cell */
the wait cell */
};
};
...
@@ -218,10 +215,7 @@ sync_array_create(
...
@@ -218,10 +215,7 @@ sync_array_create(
for
(
i
=
0
;
i
<
n_cells
;
i
++
)
{
for
(
i
=
0
;
i
<
n_cells
;
i
++
)
{
cell
=
sync_array_get_nth_cell
(
arr
,
i
);
cell
=
sync_array_get_nth_cell
(
arr
,
i
);
cell
->
wait_object
=
NULL
;
cell
->
wait_object
=
NULL
;
cell
->
waiting
=
FALSE
;
/* Create an operating system event semaphore with no name */
cell
->
event
=
os_event_create
(
NULL
);
cell
->
event_set
=
FALSE
;
/* it is created in reset state */
}
}
return
(
arr
);
return
(
arr
);
...
@@ -235,19 +229,12 @@ sync_array_free(
...
@@ -235,19 +229,12 @@ sync_array_free(
/*============*/
/*============*/
sync_array_t
*
arr
)
/* in, own: sync wait array */
sync_array_t
*
arr
)
/* in, own: sync wait array */
{
{
ulint
i
;
sync_cell_t
*
cell
;
ulint
protection
;
ulint
protection
;
ut_a
(
arr
->
n_reserved
==
0
);
ut_a
(
arr
->
n_reserved
==
0
);
sync_array_validate
(
arr
);
sync_array_validate
(
arr
);
for
(
i
=
0
;
i
<
arr
->
n_cells
;
i
++
)
{
cell
=
sync_array_get_nth_cell
(
arr
,
i
);
os_event_free
(
cell
->
event
);
}
protection
=
arr
->
protection
;
protection
=
arr
->
protection
;
/* Release the mutex protecting the wait array complex */
/* Release the mutex protecting the wait array complex */
...
@@ -292,28 +279,20 @@ sync_array_validate(
...
@@ -292,28 +279,20 @@ sync_array_validate(
sync_array_exit
(
arr
);
sync_array_exit
(
arr
);
}
}
/***********************************************************************
Puts the cell event in set state. */
static
void
sync_cell_event_set
(
/*================*/
sync_cell_t
*
cell
)
/* in: array cell */
{
os_event_set
(
cell
->
event
);
cell
->
event_set
=
TRUE
;
}
/***********************************************************************
/***********************************************************************
Puts the cell event in reset state. */
Puts the cell event in reset state. */
static
static
void
void
sync_cell_event_reset
(
sync_cell_event_reset
(
/*==================*/
/*==================*/
sync_cell_t
*
cell
)
/* in: array cell */
ulint
type
,
/* in: lock type mutex/rw_lock */
void
*
object
)
/* in: the rw_lock/mutex object */
{
{
os_event_reset
(
cell
->
event
);
if
(
type
==
SYNC_MUTEX
)
{
cell
->
event_set
=
FALSE
;
os_event_reset
(((
mutex_t
*
)
object
)
->
event
);
}
else
{
os_event_reset
(((
rw_lock_t
*
)
object
)
->
event
);
}
}
}
/**********************************************************************
/**********************************************************************
...
@@ -346,14 +325,7 @@ sync_array_reserve_cell(
...
@@ -346,14 +325,7 @@ sync_array_reserve_cell(
if
(
cell
->
wait_object
==
NULL
)
{
if
(
cell
->
wait_object
==
NULL
)
{
/* Make sure the event is reset */
cell
->
waiting
=
FALSE
;
if
(
cell
->
event_set
)
{
sync_cell_event_reset
(
cell
);
}
cell
->
reservation_time
=
time
(
NULL
);
cell
->
thread
=
os_thread_get_curr_id
();
cell
->
wait_object
=
object
;
cell
->
wait_object
=
object
;
if
(
type
==
SYNC_MUTEX
)
{
if
(
type
==
SYNC_MUTEX
)
{
...
@@ -363,7 +335,6 @@ sync_array_reserve_cell(
...
@@ -363,7 +335,6 @@ sync_array_reserve_cell(
}
}
cell
->
request_type
=
type
;
cell
->
request_type
=
type
;
cell
->
waiting
=
FALSE
;
cell
->
file
=
file
;
cell
->
file
=
file
;
cell
->
line
=
line
;
cell
->
line
=
line
;
...
@@ -374,6 +345,13 @@ sync_array_reserve_cell(
...
@@ -374,6 +345,13 @@ sync_array_reserve_cell(
sync_array_exit
(
arr
);
sync_array_exit
(
arr
);
/* Make sure the event is reset */
sync_cell_event_reset
(
type
,
object
);
cell
->
reservation_time
=
time
(
NULL
);
cell
->
thread
=
os_thread_get_curr_id
();
return
;
return
;
}
}
}
}
...
@@ -408,7 +386,12 @@ sync_array_wait_event(
...
@@ -408,7 +386,12 @@ sync_array_wait_event(
ut_a
(
!
cell
->
waiting
);
ut_a
(
!
cell
->
waiting
);
ut_ad
(
os_thread_get_curr_id
()
==
cell
->
thread
);
ut_ad
(
os_thread_get_curr_id
()
==
cell
->
thread
);
event
=
cell
->
event
;
if
(
cell
->
request_type
==
SYNC_MUTEX
)
{
event
=
((
mutex_t
*
)
cell
->
wait_object
)
->
event
;
}
else
{
event
=
((
rw_lock_t
*
)
cell
->
wait_object
)
->
event
;
}
cell
->
waiting
=
TRUE
;
cell
->
waiting
=
TRUE
;
#ifdef UNIV_SYNC_DEBUG
#ifdef UNIV_SYNC_DEBUG
...
@@ -510,10 +493,6 @@ sync_array_cell_print(
...
@@ -510,10 +493,6 @@ sync_array_cell_print(
if
(
!
cell
->
waiting
)
{
if
(
!
cell
->
waiting
)
{
fputs
(
"wait has ended
\n
"
,
file
);
fputs
(
"wait has ended
\n
"
,
file
);
}
}
if
(
cell
->
event_set
)
{
fputs
(
"wait is ending
\n
"
,
file
);
}
}
}
#ifdef UNIV_SYNC_DEBUG
#ifdef UNIV_SYNC_DEBUG
...
@@ -623,7 +602,7 @@ sync_array_detect_deadlock(
...
@@ -623,7 +602,7 @@ sync_array_detect_deadlock(
depth
++
;
depth
++
;
if
(
cell
->
event_set
||
!
cell
->
waiting
)
{
if
(
!
cell
->
waiting
)
{
return
(
FALSE
);
/* No deadlock here */
return
(
FALSE
);
/* No deadlock here */
}
}
...
@@ -802,6 +781,7 @@ sync_array_free_cell(
...
@@ -802,6 +781,7 @@ sync_array_free_cell(
ut_a
(
cell
->
wait_object
!=
NULL
);
ut_a
(
cell
->
wait_object
!=
NULL
);
cell
->
waiting
=
FALSE
;
cell
->
wait_object
=
NULL
;
cell
->
wait_object
=
NULL
;
ut_a
(
arr
->
n_reserved
>
0
);
ut_a
(
arr
->
n_reserved
>
0
);
...
@@ -811,44 +791,17 @@ sync_array_free_cell(
...
@@ -811,44 +791,17 @@ sync_array_free_cell(
}
}
/**************************************************************************
/**************************************************************************
Looks for the cells in the wait array which refer to the wait object
Increments the signalled count. */
specified, and sets their corresponding events to the signaled state. In this
way releases the threads waiting for the object to contend for the object.
It is possible that no such cell is found, in which case does nothing. */
void
void
sync_array_signal_object
(
sync_array_object_signalled
(
/*=====================*/
/*========================*/
sync_array_t
*
arr
,
/* in: wait array */
sync_array_t
*
arr
)
/* in: wait array */
void
*
object
)
/* in: wait object */
{
{
sync_cell_t
*
cell
;
ulint
count
;
ulint
i
;
sync_array_enter
(
arr
);
sync_array_enter
(
arr
);
arr
->
sg_count
++
;
arr
->
sg_count
++
;
i
=
0
;
count
=
0
;
while
(
count
<
arr
->
n_reserved
)
{
cell
=
sync_array_get_nth_cell
(
arr
,
i
);
if
(
cell
->
wait_object
!=
NULL
)
{
count
++
;
if
(
cell
->
wait_object
==
object
)
{
sync_cell_event_set
(
cell
);
}
}
i
++
;
}
sync_array_exit
(
arr
);
sync_array_exit
(
arr
);
}
}
...
@@ -881,7 +834,17 @@ sync_arr_wake_threads_if_sema_free(void)
...
@@ -881,7 +834,17 @@ sync_arr_wake_threads_if_sema_free(void)
if
(
sync_arr_cell_can_wake_up
(
cell
))
{
if
(
sync_arr_cell_can_wake_up
(
cell
))
{
sync_cell_event_set
(
cell
);
if
(
cell
->
request_type
==
SYNC_MUTEX
)
{
mutex_t
*
mutex
;
mutex
=
cell
->
wait_object
;
os_event_set
(
mutex
->
event
);
}
else
{
rw_lock_t
*
lock
;
lock
=
cell
->
wait_object
;
os_event_set
(
lock
->
event
);
}
}
}
}
}
...
@@ -911,7 +874,7 @@ sync_array_print_long_waits(void)
...
@@ -911,7 +874,7 @@ sync_array_print_long_waits(void)
cell
=
sync_array_get_nth_cell
(
sync_primary_wait_array
,
i
);
cell
=
sync_array_get_nth_cell
(
sync_primary_wait_array
,
i
);
if
(
cell
->
wait_object
!=
NULL
if
(
cell
->
wait_object
!=
NULL
&&
cell
->
waiting
&&
difftime
(
time
(
NULL
),
cell
->
reservation_time
)
>
240
)
{
&&
difftime
(
time
(
NULL
),
cell
->
reservation_time
)
>
240
)
{
fputs
(
"InnoDB: Warning: a long semaphore wait:
\n
"
,
fputs
(
"InnoDB: Warning: a long semaphore wait:
\n
"
,
stderr
);
stderr
);
...
@@ -919,7 +882,7 @@ sync_array_print_long_waits(void)
...
@@ -919,7 +882,7 @@ sync_array_print_long_waits(void)
noticed
=
TRUE
;
noticed
=
TRUE
;
}
}
if
(
cell
->
wait_object
!=
NULL
if
(
cell
->
wait_object
!=
NULL
&&
cell
->
waiting
&&
difftime
(
time
(
NULL
),
cell
->
reservation_time
)
&&
difftime
(
time
(
NULL
),
cell
->
reservation_time
)
>
fatal_timeout
)
{
>
fatal_timeout
)
{
fatal
=
TRUE
;
fatal
=
TRUE
;
...
...
innobase/sync/sync0rw.c
View file @
59182189
...
@@ -123,6 +123,7 @@ rw_lock_create_func(
...
@@ -123,6 +123,7 @@ rw_lock_create_func(
lock
->
last_x_file_name
=
"not yet reserved"
;
lock
->
last_x_file_name
=
"not yet reserved"
;
lock
->
last_s_line
=
0
;
lock
->
last_s_line
=
0
;
lock
->
last_x_line
=
0
;
lock
->
last_x_line
=
0
;
lock
->
event
=
os_event_create
(
NULL
);
mutex_enter
(
&
rw_lock_list_mutex
);
mutex_enter
(
&
rw_lock_list_mutex
);
...
@@ -158,6 +159,7 @@ rw_lock_free(
...
@@ -158,6 +159,7 @@ rw_lock_free(
mutex_free
(
rw_lock_get_mutex
(
lock
));
mutex_free
(
rw_lock_get_mutex
(
lock
));
mutex_enter
(
&
rw_lock_list_mutex
);
mutex_enter
(
&
rw_lock_list_mutex
);
os_event_free
(
lock
->
event
);
if
(
UT_LIST_GET_PREV
(
list
,
lock
))
{
if
(
UT_LIST_GET_PREV
(
list
,
lock
))
{
ut_a
(
UT_LIST_GET_PREV
(
list
,
lock
)
->
magic_n
==
RW_LOCK_MAGIC_N
);
ut_a
(
UT_LIST_GET_PREV
(
list
,
lock
)
->
magic_n
==
RW_LOCK_MAGIC_N
);
...
...
innobase/sync/sync0sync.c
View file @
59182189
...
@@ -210,6 +210,7 @@ mutex_create_func(
...
@@ -210,6 +210,7 @@ mutex_create_func(
os_fast_mutex_init
(
&
(
mutex
->
os_fast_mutex
));
os_fast_mutex_init
(
&
(
mutex
->
os_fast_mutex
));
mutex
->
lock_word
=
0
;
mutex
->
lock_word
=
0
;
#endif
#endif
mutex
->
event
=
os_event_create
(
NULL
);
mutex_set_waiters
(
mutex
,
0
);
mutex_set_waiters
(
mutex
,
0
);
mutex
->
magic_n
=
MUTEX_MAGIC_N
;
mutex
->
magic_n
=
MUTEX_MAGIC_N
;
#ifdef UNIV_SYNC_DEBUG
#ifdef UNIV_SYNC_DEBUG
...
@@ -275,6 +276,8 @@ mutex_free(
...
@@ -275,6 +276,8 @@ mutex_free(
mutex_exit
(
&
mutex_list_mutex
);
mutex_exit
(
&
mutex_list_mutex
);
}
}
os_event_free
(
mutex
->
event
);
#if !defined(_WIN32) || !defined(UNIV_CAN_USE_X86_ASSEMBLER)
#if !defined(_WIN32) || !defined(UNIV_CAN_USE_X86_ASSEMBLER)
os_fast_mutex_free
(
&
(
mutex
->
os_fast_mutex
));
os_fast_mutex_free
(
&
(
mutex
->
os_fast_mutex
));
#endif
#endif
...
@@ -498,8 +501,8 @@ mutex_signal_object(
...
@@ -498,8 +501,8 @@ mutex_signal_object(
/* The memory order of resetting the waiters field and
/* The memory order of resetting the waiters field and
signaling the object is important. See LEMMA 1 above. */
signaling the object is important. See LEMMA 1 above. */
os_event_set
(
mutex
->
event
);
sync_array_
signal_object
(
sync_primary_wait_array
,
mutex
);
sync_array_
object_signalled
(
sync_primary_wait_array
);
}
}
#ifdef UNIV_SYNC_DEBUG
#ifdef UNIV_SYNC_DEBUG
...
@@ -1047,6 +1050,7 @@ sync_thread_add_level(
...
@@ -1047,6 +1050,7 @@ sync_thread_add_level(
ut_a
(
sync_thread_levels_g
(
array
,
SYNC_PURGE_SYS
));
ut_a
(
sync_thread_levels_g
(
array
,
SYNC_PURGE_SYS
));
}
else
if
(
level
==
SYNC_TREE_NODE
)
{
}
else
if
(
level
==
SYNC_TREE_NODE
)
{
ut_a
(
sync_thread_levels_contain
(
array
,
SYNC_INDEX_TREE
)
ut_a
(
sync_thread_levels_contain
(
array
,
SYNC_INDEX_TREE
)
||
sync_thread_levels_contain
(
array
,
SYNC_DICT_OPERATION
)
||
sync_thread_levels_g
(
array
,
SYNC_TREE_NODE
-
1
));
||
sync_thread_levels_g
(
array
,
SYNC_TREE_NODE
-
1
));
}
else
if
(
level
==
SYNC_TREE_NODE_FROM_HASH
)
{
}
else
if
(
level
==
SYNC_TREE_NODE_FROM_HASH
)
{
ut_a
(
1
);
ut_a
(
1
);
...
...
mysql-test/mysql-test-run.pl
View file @
59182189
...
@@ -188,6 +188,7 @@ our $opt_force;
...
@@ -188,6 +188,7 @@ our $opt_force;
our
$opt_reorder
=
0
;
our
$opt_reorder
=
0
;
our
$opt_enable_disabled
;
our
$opt_enable_disabled
;
our
$opt_mem
=
$ENV
{'
MTR_MEM
'};
our
$opt_mem
=
$ENV
{'
MTR_MEM
'};
our
$opt_report_features
;
our
$opt_gcov
;
our
$opt_gcov
;
our
$opt_gcov_err
;
our
$opt_gcov_err
;
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment