Commit ca0bdc4d authored by Jan Lindström's avatar Jan Lindström

MDEV-6937: buf_flush_LRU() does not return correct number in case of

compressed pages

buf_flush_LRU() returns the number of pages processed. There are
two types of processing that can happen. A page can get evicted or
a page can get flushed. These two numbers are quite distinct and
should not be mixed.
parent 78e31f0a
...@@ -75,6 +75,15 @@ in thrashing. */ ...@@ -75,6 +75,15 @@ in thrashing. */
/* @} */ /* @} */
/** Handled page counters for a single flush */
struct flush_counters_t {
ulint flushed; /*!< number of dirty pages flushed */
ulint evicted; /*!< number of clean pages evicted, including
evicted uncompressed page images */
ulint unzip_LRU_evicted;/*!< number of uncompressed page images
evicted */
};
/******************************************************************//** /******************************************************************//**
Increases flush_list size in bytes with zip_size for compressed page, Increases flush_list size in bytes with zip_size for compressed page,
UNIV_PAGE_SIZE for uncompressed page in inline function */ UNIV_PAGE_SIZE for uncompressed page in inline function */
...@@ -1434,12 +1443,14 @@ it is a best effort attempt and it is not guaranteed that after a call ...@@ -1434,12 +1443,14 @@ it is a best effort attempt and it is not guaranteed that after a call
to this function there will be 'max' blocks in the free list. to this function there will be 'max' blocks in the free list.
@return number of blocks for which the write request was queued. */ @return number of blocks for which the write request was queued. */
static static
ulint void
buf_flush_LRU_list_batch( buf_flush_LRU_list_batch(
/*=====================*/ /*=====================*/
buf_pool_t* buf_pool, /*!< in: buffer pool instance */ buf_pool_t* buf_pool, /*!< in: buffer pool instance */
ulint max) /*!< in: desired number of ulint max, /*!< in: desired number of
blocks in the free_list */ blocks in the free_list */
flush_counters_t* n) /*!< out: flushed/evicted page
counts */
{ {
buf_page_t* bpage; buf_page_t* bpage;
ulint count = 0; ulint count = 0;
...@@ -1449,8 +1460,13 @@ buf_flush_LRU_list_batch( ...@@ -1449,8 +1460,13 @@ buf_flush_LRU_list_batch(
ut_ad(buf_pool_mutex_own(buf_pool)); ut_ad(buf_pool_mutex_own(buf_pool));
n->flushed = 0;
n->evicted = 0;
n->unzip_LRU_evicted = 0;
bpage = UT_LIST_GET_LAST(buf_pool->LRU); bpage = UT_LIST_GET_LAST(buf_pool->LRU);
while (bpage != NULL && count < max while (bpage != NULL && count < max
&& (n->flushed + n->evicted) < max
&& free_len < srv_LRU_scan_depth && free_len < srv_LRU_scan_depth
&& lru_len > BUF_LRU_MIN_LEN) { && lru_len > BUF_LRU_MIN_LEN) {
...@@ -1478,6 +1494,7 @@ buf_flush_LRU_list_batch( ...@@ -1478,6 +1494,7 @@ buf_flush_LRU_list_batch(
if (buf_LRU_free_page(bpage, true)) { if (buf_LRU_free_page(bpage, true)) {
/* buf_pool->mutex was potentially /* buf_pool->mutex was potentially
released and reacquired. */ released and reacquired. */
n->evicted++;
bpage = UT_LIST_GET_LAST(buf_pool->LRU); bpage = UT_LIST_GET_LAST(buf_pool->LRU);
} else { } else {
bpage = UT_LIST_GET_PREV(LRU, bpage); bpage = UT_LIST_GET_PREV(LRU, bpage);
...@@ -1500,7 +1517,7 @@ buf_flush_LRU_list_batch( ...@@ -1500,7 +1517,7 @@ buf_flush_LRU_list_batch(
} }
if (!buf_flush_page_and_try_neighbors( if (!buf_flush_page_and_try_neighbors(
bpage, BUF_FLUSH_LRU, max, &count)) { bpage, BUF_FLUSH_LRU, max, &n->flushed)) {
bpage = prev_bpage; bpage = prev_bpage;
} else { } else {
...@@ -1529,7 +1546,7 @@ buf_flush_LRU_list_batch( ...@@ -1529,7 +1546,7 @@ buf_flush_LRU_list_batch(
/* We keep track of all flushes happening as part of LRU /* We keep track of all flushes happening as part of LRU
flush. When estimating the desired rate at which flush_list flush. When estimating the desired rate at which flush_list
should be flushed, we factor in this value. */ should be flushed, we factor in this value. */
buf_lru_flush_page_count += count; buf_lru_flush_page_count += n->flushed;
ut_ad(buf_pool_mutex_own(buf_pool)); ut_ad(buf_pool_mutex_own(buf_pool));
...@@ -1540,8 +1557,6 @@ buf_flush_LRU_list_batch( ...@@ -1540,8 +1557,6 @@ buf_flush_LRU_list_batch(
MONITOR_LRU_BATCH_SCANNED_PER_CALL, MONITOR_LRU_BATCH_SCANNED_PER_CALL,
scanned); scanned);
} }
return(count);
} }
/*******************************************************************//** /*******************************************************************//**
...@@ -1551,24 +1566,28 @@ Whether LRU or unzip_LRU is used depends on the state of the system. ...@@ -1551,24 +1566,28 @@ Whether LRU or unzip_LRU is used depends on the state of the system.
or in case of unzip_LRU the number of blocks actually moved to the or in case of unzip_LRU the number of blocks actually moved to the
free list */ free list */
static static
ulint void
buf_do_LRU_batch( buf_do_LRU_batch(
/*=============*/ /*=============*/
buf_pool_t* buf_pool, /*!< in: buffer pool instance */ buf_pool_t* buf_pool, /*!< in: buffer pool instance */
ulint max) /*!< in: desired number of ulint max, /*!< in: desired number of
blocks in the free_list */ blocks in the free_list */
flush_counters_t* n) /*!< out: flushed/evicted page
counts */
{ {
ulint count = 0;
if (buf_LRU_evict_from_unzip_LRU(buf_pool)) { if (buf_LRU_evict_from_unzip_LRU(buf_pool)) {
count += buf_free_from_unzip_LRU_list_batch(buf_pool, max); n->unzip_LRU_evicted = buf_free_from_unzip_LRU_list_batch(buf_pool, max);
} else {
n->unzip_LRU_evicted = 0;
} }
if (max > count) { if (max > n->unzip_LRU_evicted) {
count += buf_flush_LRU_list_batch(buf_pool, max - count); buf_flush_LRU_list_batch(buf_pool, max - n->unzip_LRU_evicted, n);
} else {
n->evicted = 0;
n->flushed = 0;
} }
return(count);
} }
/*******************************************************************//** /*******************************************************************//**
...@@ -1667,7 +1686,7 @@ end up waiting for these latches! NOTE 2: in the case of a flush list flush, ...@@ -1667,7 +1686,7 @@ end up waiting for these latches! NOTE 2: in the case of a flush list flush,
the calling thread is not allowed to own any latches on pages! the calling thread is not allowed to own any latches on pages!
@return number of blocks for which the write request was queued */ @return number of blocks for which the write request was queued */
static static
ulint void
buf_flush_batch( buf_flush_batch(
/*============*/ /*============*/
buf_pool_t* buf_pool, /*!< in: buffer pool instance */ buf_pool_t* buf_pool, /*!< in: buffer pool instance */
...@@ -1678,13 +1697,14 @@ buf_flush_batch( ...@@ -1678,13 +1697,14 @@ buf_flush_batch(
ulint min_n, /*!< in: wished minimum mumber of blocks ulint min_n, /*!< in: wished minimum mumber of blocks
flushed (it is not guaranteed that the flushed (it is not guaranteed that the
actual number is that big, though) */ actual number is that big, though) */
lsn_t lsn_limit) /*!< in: in the case of BUF_FLUSH_LIST lsn_t lsn_limit, /*!< in: in the case of BUF_FLUSH_LIST
all blocks whose oldest_modification is all blocks whose oldest_modification is
smaller than this should be flushed smaller than this should be flushed
(if their number does not exceed (if their number does not exceed
min_n), otherwise ignored */ min_n), otherwise ignored */
flush_counters_t* n) /*!< out: flushed/evicted page
counts */
{ {
ulint count = 0;
ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST); ut_ad(flush_type == BUF_FLUSH_LRU || flush_type == BUF_FLUSH_LIST);
#ifdef UNIV_SYNC_DEBUG #ifdef UNIV_SYNC_DEBUG
...@@ -1698,10 +1718,11 @@ buf_flush_batch( ...@@ -1698,10 +1718,11 @@ buf_flush_batch(
the flush functions. */ the flush functions. */
switch (flush_type) { switch (flush_type) {
case BUF_FLUSH_LRU: case BUF_FLUSH_LRU:
count = buf_do_LRU_batch(buf_pool, min_n); buf_do_LRU_batch(buf_pool, min_n, n);
break; break;
case BUF_FLUSH_LIST: case BUF_FLUSH_LIST:
count = buf_do_flush_list_batch(buf_pool, min_n, lsn_limit); n->flushed = buf_do_flush_list_batch(buf_pool, min_n, lsn_limit);
n->evicted = 0;
break; break;
default: default:
ut_error; ut_error;
...@@ -1710,15 +1731,13 @@ buf_flush_batch( ...@@ -1710,15 +1731,13 @@ buf_flush_batch(
buf_pool_mutex_exit(buf_pool); buf_pool_mutex_exit(buf_pool);
#ifdef UNIV_DEBUG #ifdef UNIV_DEBUG
if (buf_debug_prints && count > 0) { if (buf_debug_prints && n->flushed > 0) {
fprintf(stderr, flush_type == BUF_FLUSH_LRU fprintf(stderr, flush_type == BUF_FLUSH_LRU
? "Flushed %lu pages in LRU flush\n" ? "Flushed %lu pages in LRU flush\n"
: "Flushed %lu pages in flush list flush\n", : "Flushed %lu pages in flush list flush\n",
(ulong) count); (ulong) n->flushed);
} }
#endif /* UNIV_DEBUG */ #endif /* UNIV_DEBUG */
return(count);
} }
/******************************************************************//** /******************************************************************//**
...@@ -1847,29 +1866,21 @@ buf_flush_LRU( ...@@ -1847,29 +1866,21 @@ buf_flush_LRU(
ulint min_n, /*!< in: wished minimum mumber of blocks ulint min_n, /*!< in: wished minimum mumber of blocks
flushed (it is not guaranteed that the flushed (it is not guaranteed that the
actual number is that big, though) */ actual number is that big, though) */
ulint* n_processed) /*!< out: the number of pages flush_counters_t *n) /*!< out: flushed/evicted page
which were processed is passed counts */
back to caller. Ignored if NULL */
{ {
ulint page_count;
if (n_processed) {
*n_processed = 0;
}
if (!buf_flush_start(buf_pool, BUF_FLUSH_LRU)) { if (!buf_flush_start(buf_pool, BUF_FLUSH_LRU)) {
n->flushed = 0;
n->evicted = 0;
n->unzip_LRU_evicted = 0;
return(false); return(false);
} }
page_count = buf_flush_batch(buf_pool, BUF_FLUSH_LRU, min_n, 0); buf_flush_batch(buf_pool, BUF_FLUSH_LRU, min_n, 0, n);
buf_flush_end(buf_pool, BUF_FLUSH_LRU); buf_flush_end(buf_pool, BUF_FLUSH_LRU);
buf_flush_common(BUF_FLUSH_LRU, page_count); buf_flush_common(BUF_FLUSH_LRU, n->flushed);
if (n_processed) {
*n_processed = page_count;
}
return(true); return(true);
} }
...@@ -1917,7 +1928,7 @@ buf_flush_list( ...@@ -1917,7 +1928,7 @@ buf_flush_list(
/* Flush to lsn_limit in all buffer pool instances */ /* Flush to lsn_limit in all buffer pool instances */
for (i = 0; i < srv_buf_pool_instances; i++) { for (i = 0; i < srv_buf_pool_instances; i++) {
buf_pool_t* buf_pool; buf_pool_t* buf_pool;
ulint page_count = 0; flush_counters_t n;
buf_pool = buf_pool_from_array(i); buf_pool = buf_pool_from_array(i);
...@@ -1937,23 +1948,23 @@ buf_flush_list( ...@@ -1937,23 +1948,23 @@ buf_flush_list(
continue; continue;
} }
page_count = buf_flush_batch( buf_flush_batch(
buf_pool, BUF_FLUSH_LIST, min_n, lsn_limit); buf_pool, BUF_FLUSH_LIST, min_n, lsn_limit, &n);
buf_flush_end(buf_pool, BUF_FLUSH_LIST); buf_flush_end(buf_pool, BUF_FLUSH_LIST);
buf_flush_common(BUF_FLUSH_LIST, page_count); buf_flush_common(BUF_FLUSH_LIST, n.flushed);
if (n_processed) { if (n_processed) {
*n_processed += page_count; *n_processed += n.flushed;
} }
if (page_count) { if (n.flushed) {
MONITOR_INC_VALUE_CUMULATIVE( MONITOR_INC_VALUE_CUMULATIVE(
MONITOR_FLUSH_BATCH_TOTAL_PAGE, MONITOR_FLUSH_BATCH_TOTAL_PAGE,
MONITOR_FLUSH_BATCH_COUNT, MONITOR_FLUSH_BATCH_COUNT,
MONITOR_FLUSH_BATCH_PAGES, MONITOR_FLUSH_BATCH_PAGES,
page_count); n.flushed);
} }
} }
...@@ -2091,7 +2102,7 @@ buf_flush_LRU_tail(void) ...@@ -2091,7 +2102,7 @@ buf_flush_LRU_tail(void)
j < scan_depth; j < scan_depth;
j += PAGE_CLEANER_LRU_BATCH_CHUNK_SIZE) { j += PAGE_CLEANER_LRU_BATCH_CHUNK_SIZE) {
ulint n_flushed = 0; flush_counters_t n;
/* Currently page_cleaner is the only thread /* Currently page_cleaner is the only thread
that can trigger an LRU flush. It is possible that can trigger an LRU flush. It is possible
...@@ -2099,7 +2110,7 @@ buf_flush_LRU_tail(void) ...@@ -2099,7 +2110,7 @@ buf_flush_LRU_tail(void)
still running, */ still running, */
if (buf_flush_LRU(buf_pool, if (buf_flush_LRU(buf_pool,
PAGE_CLEANER_LRU_BATCH_CHUNK_SIZE, PAGE_CLEANER_LRU_BATCH_CHUNK_SIZE,
&n_flushed)) { &n)) {
/* Allowed only one batch per /* Allowed only one batch per
buffer pool instance. */ buffer pool instance. */
...@@ -2107,8 +2118,8 @@ buf_flush_LRU_tail(void) ...@@ -2107,8 +2118,8 @@ buf_flush_LRU_tail(void)
buf_pool, BUF_FLUSH_LRU); buf_pool, BUF_FLUSH_LRU);
} }
if (n_flushed) { if (n.flushed) {
total_flushed += n_flushed; total_flushed += n.flushed;
} else { } else {
/* Nothing to flush */ /* Nothing to flush */
break; break;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment