Commit b0fc27dc authored by Marko Mäkelä's avatar Marko Mäkelä

Bug #61341 buf_LRU_insert_zip_clean can be O(N) on LRU length

The buf_pool->zip_clean list is only needed for debugging, or for
recomputing buf_pool->page_hash when resizing the buffer pool. Buffer
pool resizing was never fully implemented. Remove the resizing code,
and define buf_pool->zip_clean only in debug builds.

buf_pool->zip_clean, buf_LRU_insert_zip_clean(): Enclose in
#if defined UNIV_DEBUG || UNIV_BUF_DEBUG.

buf_chunk_free(), buf_chunk_all_free(), buf_pool_shrink(),
buf_pool_page_hash_rebuild(), buf_pool_resize(): Remove (unreachable code).

rb:671 approved by Inaam Rana
parent e4aa6667
2011-06-16 The InnoDB Team
* buf/buf0buddy.c, buf/buf0buf.c, buf/buf0flu.c, buf/buf0lru.c,
include/buf0buf.h, include/buf0lru.h:
Fix Bug#61341 buf_LRU_insert_zip_clean can be O(N) on LRU length
2011-06-16 The InnoDB Team 2011-06-16 The InnoDB Team
* page/page0zip.c, rem/rem0rec.c: * page/page0zip.c, rem/rem0rec.c:
......
/***************************************************************************** /*****************************************************************************
Copyright (c) 2006, 2010, Innobase Oy. All Rights Reserved. Copyright (c) 2006, 2011, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
...@@ -351,7 +351,9 @@ buf_buddy_relocate_block( ...@@ -351,7 +351,9 @@ buf_buddy_relocate_block(
buf_page_t* bpage, /*!< in: block to relocate */ buf_page_t* bpage, /*!< in: block to relocate */
buf_page_t* dpage) /*!< in: free block to relocate to */ buf_page_t* dpage) /*!< in: free block to relocate to */
{ {
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
buf_page_t* b; buf_page_t* b;
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
ut_ad(buf_pool_mutex_own()); ut_ad(buf_pool_mutex_own());
...@@ -380,7 +382,7 @@ buf_buddy_relocate_block( ...@@ -380,7 +382,7 @@ buf_buddy_relocate_block(
buf_relocate(bpage, dpage); buf_relocate(bpage, dpage);
ut_d(bpage->state = BUF_BLOCK_ZIP_FREE); ut_d(bpage->state = BUF_BLOCK_ZIP_FREE);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
/* relocate buf_pool->zip_clean */ /* relocate buf_pool->zip_clean */
b = UT_LIST_GET_PREV(list, dpage); b = UT_LIST_GET_PREV(list, dpage);
UT_LIST_REMOVE(list, buf_pool->zip_clean, dpage); UT_LIST_REMOVE(list, buf_pool->zip_clean, dpage);
...@@ -390,6 +392,7 @@ buf_buddy_relocate_block( ...@@ -390,6 +392,7 @@ buf_buddy_relocate_block(
} else { } else {
UT_LIST_ADD_FIRST(list, buf_pool->zip_clean, dpage); UT_LIST_ADD_FIRST(list, buf_pool->zip_clean, dpage);
} }
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
UNIV_MEM_INVALID(bpage, sizeof *bpage); UNIV_MEM_INVALID(bpage, sizeof *bpage);
......
/***************************************************************************** /*****************************************************************************
Copyright (c) 1995, 2010, Innobase Oy. All Rights Reserved. Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc. Copyright (c) 2008, Google Inc.
Portions of this file contain modifications contributed and copyrighted by Portions of this file contain modifications contributed and copyrighted by
...@@ -873,72 +873,6 @@ buf_chunk_not_freed( ...@@ -873,72 +873,6 @@ buf_chunk_not_freed(
return(NULL); return(NULL);
} }
/*********************************************************************//**
Checks that all blocks in the buffer chunk are in BUF_BLOCK_NOT_USED state.
@return TRUE if all freed */
static
ibool
buf_chunk_all_free(
/*===============*/
const buf_chunk_t* chunk) /*!< in: chunk being checked */
{
const buf_block_t* block;
ulint i;
ut_ad(buf_pool);
ut_ad(buf_pool_mutex_own());
block = chunk->blocks;
for (i = chunk->size; i--; block++) {
if (buf_block_get_state(block) != BUF_BLOCK_NOT_USED) {
return(FALSE);
}
}
return(TRUE);
}
/********************************************************************//**
Frees a chunk of buffer frames. */
static
void
buf_chunk_free(
/*===========*/
buf_chunk_t* chunk) /*!< out: chunk of buffers */
{
buf_block_t* block;
const buf_block_t* block_end;
ut_ad(buf_pool_mutex_own());
block_end = chunk->blocks + chunk->size;
for (block = chunk->blocks; block < block_end; block++) {
ut_a(buf_block_get_state(block) == BUF_BLOCK_NOT_USED);
ut_a(!block->page.zip.data);
ut_ad(!block->page.in_LRU_list);
ut_ad(!block->in_unzip_LRU_list);
ut_ad(!block->page.in_flush_list);
/* Remove the block from the free list. */
ut_ad(block->page.in_free_list);
UT_LIST_REMOVE(list, buf_pool->free, (&block->page));
/* Free the latches. */
mutex_free(&block->mutex);
rw_lock_free(&block->lock);
#ifdef UNIV_SYNC_DEBUG
rw_lock_free(&block->debug_latch);
#endif /* UNIV_SYNC_DEBUG */
UNIV_MEM_UNDESC(block);
}
os_mem_free_large(chunk->mem, chunk->mem_size);
}
/********************************************************************//** /********************************************************************//**
Creates the buffer pool. Creates the buffer pool.
@return own: buf_pool object, NULL if not enough memory or error */ @return own: buf_pool object, NULL if not enough memory or error */
...@@ -1017,8 +951,6 @@ buf_pool_free(void) ...@@ -1017,8 +951,6 @@ buf_pool_free(void)
chunk = chunks + buf_pool->n_chunks; chunk = chunks + buf_pool->n_chunks;
while (--chunk >= chunks) { while (--chunk >= chunks) {
/* Bypass the checks of buf_chunk_free(), since they
would fail at shutdown. */
os_mem_free_large(chunk->mem, chunk->mem_size); os_mem_free_large(chunk->mem, chunk->mem_size);
} }
...@@ -1193,311 +1125,6 @@ buf_relocate( ...@@ -1193,311 +1125,6 @@ buf_relocate(
HASH_INSERT(buf_page_t, hash, buf_pool->page_hash, fold, dpage); HASH_INSERT(buf_page_t, hash, buf_pool->page_hash, fold, dpage);
} }
/********************************************************************//**
Shrinks the buffer pool. */
static
void
buf_pool_shrink(
/*============*/
ulint chunk_size) /*!< in: number of pages to remove */
{
buf_chunk_t* chunks;
buf_chunk_t* chunk;
ulint max_size;
ulint max_free_size;
buf_chunk_t* max_chunk;
buf_chunk_t* max_free_chunk;
ut_ad(!buf_pool_mutex_own());
try_again:
btr_search_disable(); /* Empty the adaptive hash index again */
buf_pool_mutex_enter();
shrink_again:
if (buf_pool->n_chunks <= 1) {
/* Cannot shrink if there is only one chunk */
goto func_done;
}
/* Search for the largest free chunk
not larger than the size difference */
chunks = buf_pool->chunks;
chunk = chunks + buf_pool->n_chunks;
max_size = max_free_size = 0;
max_chunk = max_free_chunk = NULL;
while (--chunk >= chunks) {
if (chunk->size <= chunk_size
&& chunk->size > max_free_size) {
if (chunk->size > max_size) {
max_size = chunk->size;
max_chunk = chunk;
}
if (buf_chunk_all_free(chunk)) {
max_free_size = chunk->size;
max_free_chunk = chunk;
}
}
}
if (!max_free_size) {
ulint dirty = 0;
ulint nonfree = 0;
buf_block_t* block;
buf_block_t* bend;
/* Cannot shrink: try again later
(do not assign srv_buf_pool_old_size) */
if (!max_chunk) {
goto func_exit;
}
block = max_chunk->blocks;
bend = block + max_chunk->size;
/* Move the blocks of chunk to the end of the
LRU list and try to flush them. */
for (; block < bend; block++) {
switch (buf_block_get_state(block)) {
case BUF_BLOCK_NOT_USED:
continue;
case BUF_BLOCK_FILE_PAGE:
break;
default:
nonfree++;
continue;
}
mutex_enter(&block->mutex);
/* The following calls will temporarily
release block->mutex and buf_pool_mutex.
Therefore, we have to always retry,
even if !dirty && !nonfree. */
if (!buf_flush_ready_for_replace(&block->page)) {
buf_LRU_make_block_old(&block->page);
dirty++;
} else if (buf_LRU_free_block(&block->page, TRUE)
!= BUF_LRU_FREED) {
nonfree++;
}
mutex_exit(&block->mutex);
}
buf_pool_mutex_exit();
/* Request for a flush of the chunk if it helps.
Do not flush if there are non-free blocks, since
flushing will not make the chunk freeable. */
if (nonfree) {
/* Avoid busy-waiting. */
os_thread_sleep(100000);
} else if (dirty
&& buf_flush_batch(BUF_FLUSH_LRU, dirty, 0)
== ULINT_UNDEFINED) {
buf_flush_wait_batch_end(BUF_FLUSH_LRU);
}
goto try_again;
}
max_size = max_free_size;
max_chunk = max_free_chunk;
srv_buf_pool_old_size = srv_buf_pool_size;
/* Rewrite buf_pool->chunks. Copy everything but max_chunk. */
chunks = mem_alloc((buf_pool->n_chunks - 1) * sizeof *chunks);
memcpy(chunks, buf_pool->chunks,
(max_chunk - buf_pool->chunks) * sizeof *chunks);
memcpy(chunks + (max_chunk - buf_pool->chunks),
max_chunk + 1,
buf_pool->chunks + buf_pool->n_chunks
- (max_chunk + 1));
ut_a(buf_pool->curr_size > max_chunk->size);
buf_pool->curr_size -= max_chunk->size;
srv_buf_pool_curr_size = buf_pool->curr_size * UNIV_PAGE_SIZE;
chunk_size -= max_chunk->size;
buf_chunk_free(max_chunk);
mem_free(buf_pool->chunks);
buf_pool->chunks = chunks;
buf_pool->n_chunks--;
/* Allow a slack of one megabyte. */
if (chunk_size > 1048576 / UNIV_PAGE_SIZE) {
goto shrink_again;
}
func_done:
srv_buf_pool_old_size = srv_buf_pool_size;
func_exit:
buf_pool_mutex_exit();
btr_search_enable();
}
/********************************************************************//**
Rebuild buf_pool->page_hash. */
static
void
buf_pool_page_hash_rebuild(void)
/*============================*/
{
ulint i;
ulint n_chunks;
buf_chunk_t* chunk;
hash_table_t* page_hash;
hash_table_t* zip_hash;
buf_page_t* b;
buf_pool_mutex_enter();
/* Free, create, and populate the hash table. */
hash_table_free(buf_pool->page_hash);
buf_pool->page_hash = page_hash = hash_create(2 * buf_pool->curr_size);
zip_hash = hash_create(2 * buf_pool->curr_size);
HASH_MIGRATE(buf_pool->zip_hash, zip_hash, buf_page_t, hash,
BUF_POOL_ZIP_FOLD_BPAGE);
hash_table_free(buf_pool->zip_hash);
buf_pool->zip_hash = zip_hash;
/* Insert the uncompressed file pages to buf_pool->page_hash. */
chunk = buf_pool->chunks;
n_chunks = buf_pool->n_chunks;
for (i = 0; i < n_chunks; i++, chunk++) {
ulint j;
buf_block_t* block = chunk->blocks;
for (j = 0; j < chunk->size; j++, block++) {
if (buf_block_get_state(block)
== BUF_BLOCK_FILE_PAGE) {
ut_ad(!block->page.in_zip_hash);
ut_ad(block->page.in_page_hash);
HASH_INSERT(buf_page_t, hash, page_hash,
buf_page_address_fold(
block->page.space,
block->page.offset),
&block->page);
}
}
}
/* Insert the compressed-only pages to buf_pool->page_hash.
All such blocks are either in buf_pool->zip_clean or
in buf_pool->flush_list. */
for (b = UT_LIST_GET_FIRST(buf_pool->zip_clean); b;
b = UT_LIST_GET_NEXT(list, b)) {
ut_a(buf_page_get_state(b) == BUF_BLOCK_ZIP_PAGE);
ut_ad(!b->in_flush_list);
ut_ad(b->in_LRU_list);
ut_ad(b->in_page_hash);
ut_ad(!b->in_zip_hash);
HASH_INSERT(buf_page_t, hash, page_hash,
buf_page_address_fold(b->space, b->offset), b);
}
for (b = UT_LIST_GET_FIRST(buf_pool->flush_list); b;
b = UT_LIST_GET_NEXT(list, b)) {
ut_ad(b->in_flush_list);
ut_ad(b->in_LRU_list);
ut_ad(b->in_page_hash);
ut_ad(!b->in_zip_hash);
switch (buf_page_get_state(b)) {
case BUF_BLOCK_ZIP_DIRTY:
HASH_INSERT(buf_page_t, hash, page_hash,
buf_page_address_fold(b->space,
b->offset), b);
break;
case BUF_BLOCK_FILE_PAGE:
/* uncompressed page */
break;
case BUF_BLOCK_ZIP_FREE:
case BUF_BLOCK_ZIP_PAGE:
case BUF_BLOCK_NOT_USED:
case BUF_BLOCK_READY_FOR_USE:
case BUF_BLOCK_MEMORY:
case BUF_BLOCK_REMOVE_HASH:
ut_error;
break;
}
}
buf_pool_mutex_exit();
}
/********************************************************************//**
Resizes the buffer pool. */
UNIV_INTERN
void
buf_pool_resize(void)
/*=================*/
{
buf_pool_mutex_enter();
if (srv_buf_pool_old_size == srv_buf_pool_size) {
buf_pool_mutex_exit();
return;
}
if (srv_buf_pool_curr_size + 1048576 > srv_buf_pool_size) {
buf_pool_mutex_exit();
/* Disable adaptive hash indexes and empty the index
in order to free up memory in the buffer pool chunks. */
buf_pool_shrink((srv_buf_pool_curr_size - srv_buf_pool_size)
/ UNIV_PAGE_SIZE);
} else if (srv_buf_pool_curr_size + 1048576 < srv_buf_pool_size) {
/* Enlarge the buffer pool by at least one megabyte */
ulint mem_size
= srv_buf_pool_size - srv_buf_pool_curr_size;
buf_chunk_t* chunks;
buf_chunk_t* chunk;
chunks = mem_alloc((buf_pool->n_chunks + 1) * sizeof *chunks);
memcpy(chunks, buf_pool->chunks, buf_pool->n_chunks
* sizeof *chunks);
chunk = &chunks[buf_pool->n_chunks];
if (!buf_chunk_init(chunk, mem_size)) {
mem_free(chunks);
} else {
buf_pool->curr_size += chunk->size;
srv_buf_pool_curr_size = buf_pool->curr_size
* UNIV_PAGE_SIZE;
mem_free(buf_pool->chunks);
buf_pool->chunks = chunks;
buf_pool->n_chunks++;
}
srv_buf_pool_old_size = srv_buf_pool_size;
buf_pool_mutex_exit();
}
buf_pool_page_hash_rebuild();
}
/********************************************************************//** /********************************************************************//**
Moves a page to the start of the buffer pool LRU list. This high-level Moves a page to the start of the buffer pool LRU list. This high-level
function can be used to prevent an important page from slipping out of function can be used to prevent an important page from slipping out of
...@@ -2233,8 +1860,10 @@ buf_page_get_gen( ...@@ -2233,8 +1860,10 @@ buf_page_get_gen(
if (buf_page_get_state(&block->page) if (buf_page_get_state(&block->page)
== BUF_BLOCK_ZIP_PAGE) { == BUF_BLOCK_ZIP_PAGE) {
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
UT_LIST_REMOVE(list, buf_pool->zip_clean, UT_LIST_REMOVE(list, buf_pool->zip_clean,
&block->page); &block->page);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
ut_ad(!block->page.in_flush_list); ut_ad(!block->page.in_flush_list);
} else { } else {
/* Relocate buf_pool->flush_list. */ /* Relocate buf_pool->flush_list. */
...@@ -2975,7 +2604,9 @@ buf_page_init_for_read( ...@@ -2975,7 +2604,9 @@ buf_page_init_for_read(
/* The block must be put to the LRU list, to the old blocks */ /* The block must be put to the LRU list, to the old blocks */
buf_LRU_add_block(bpage, TRUE/* to old blocks */); buf_LRU_add_block(bpage, TRUE/* to old blocks */);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
buf_LRU_insert_zip_clean(bpage); buf_LRU_insert_zip_clean(bpage);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
buf_page_set_io_fix(bpage, BUF_IO_READ); buf_page_set_io_fix(bpage, BUF_IO_READ);
......
/***************************************************************************** /*****************************************************************************
Copyright (c) 1995, 2010, Innobase Oy. All Rights Reserved. Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
...@@ -435,7 +435,9 @@ buf_flush_remove( ...@@ -435,7 +435,9 @@ buf_flush_remove(
case BUF_BLOCK_ZIP_DIRTY: case BUF_BLOCK_ZIP_DIRTY:
buf_page_set_state(bpage, BUF_BLOCK_ZIP_PAGE); buf_page_set_state(bpage, BUF_BLOCK_ZIP_PAGE);
UT_LIST_REMOVE(list, buf_pool->flush_list, bpage); UT_LIST_REMOVE(list, buf_pool->flush_list, bpage);
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
buf_LRU_insert_zip_clean(bpage); buf_LRU_insert_zip_clean(bpage);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
break; break;
case BUF_BLOCK_FILE_PAGE: case BUF_BLOCK_FILE_PAGE:
UT_LIST_REMOVE(list, buf_pool->flush_list, bpage); UT_LIST_REMOVE(list, buf_pool->flush_list, bpage);
......
/***************************************************************************** /*****************************************************************************
Copyright (c) 1995, 2010, Innobase Oy. All Rights Reserved. Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
...@@ -501,6 +501,7 @@ buf_LRU_invalidate_tablespace( ...@@ -501,6 +501,7 @@ buf_LRU_invalidate_tablespace(
} }
} }
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
/********************************************************************//** /********************************************************************//**
Insert a compressed block into buf_pool->zip_clean in the LRU order. */ Insert a compressed block into buf_pool->zip_clean in the LRU order. */
UNIV_INTERN UNIV_INTERN
...@@ -532,6 +533,7 @@ buf_LRU_insert_zip_clean( ...@@ -532,6 +533,7 @@ buf_LRU_insert_zip_clean(
UT_LIST_ADD_FIRST(list, buf_pool->zip_clean, bpage); UT_LIST_ADD_FIRST(list, buf_pool->zip_clean, bpage);
} }
} }
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
/******************************************************************//** /******************************************************************//**
Try to free an uncompressed page of a compressed block from the unzip Try to free an uncompressed page of a compressed block from the unzip
...@@ -1518,7 +1520,9 @@ buf_LRU_free_block( ...@@ -1518,7 +1520,9 @@ buf_LRU_free_block(
} }
if (b->state == BUF_BLOCK_ZIP_PAGE) { if (b->state == BUF_BLOCK_ZIP_PAGE) {
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
buf_LRU_insert_zip_clean(b); buf_LRU_insert_zip_clean(b);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
} else { } else {
/* Relocate on buf_pool->flush_list. */ /* Relocate on buf_pool->flush_list. */
buf_flush_relocate_on_flush_list(bpage, b); buf_flush_relocate_on_flush_list(bpage, b);
...@@ -1797,7 +1801,9 @@ buf_LRU_block_remove_hashed_page( ...@@ -1797,7 +1801,9 @@ buf_LRU_block_remove_hashed_page(
ut_a(bpage->zip.data); ut_a(bpage->zip.data);
ut_a(buf_page_get_zip_size(bpage)); ut_a(buf_page_get_zip_size(bpage));
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
UT_LIST_REMOVE(list, buf_pool->zip_clean, bpage); UT_LIST_REMOVE(list, buf_pool->zip_clean, bpage);
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
mutex_exit(&buf_pool_zip_mutex); mutex_exit(&buf_pool_zip_mutex);
buf_pool_mutex_exit_forbid(); buf_pool_mutex_exit_forbid();
......
...@@ -141,12 +141,6 @@ buf_relocate( ...@@ -141,12 +141,6 @@ buf_relocate(
BUF_BLOCK_ZIP_DIRTY or BUF_BLOCK_ZIP_PAGE */ BUF_BLOCK_ZIP_DIRTY or BUF_BLOCK_ZIP_PAGE */
buf_page_t* dpage) /*!< in/out: destination control block */ buf_page_t* dpage) /*!< in/out: destination control block */
__attribute__((nonnull)); __attribute__((nonnull));
/********************************************************************//**
Resizes the buffer pool. */
UNIV_INTERN
void
buf_pool_resize(void);
/*=================*/
/*********************************************************************//** /*********************************************************************//**
Gets the current size of buffer buf_pool in bytes. Gets the current size of buffer buf_pool in bytes.
@return size in bytes */ @return size in bytes */
...@@ -1446,8 +1440,10 @@ struct buf_pool_struct{ ...@@ -1446,8 +1440,10 @@ struct buf_pool_struct{
frames and buf_page_t descriptors of blocks that exist frames and buf_page_t descriptors of blocks that exist
in the buffer pool only in compressed form. */ in the buffer pool only in compressed form. */
/* @{ */ /* @{ */
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
UT_LIST_BASE_NODE_T(buf_page_t) zip_clean; UT_LIST_BASE_NODE_T(buf_page_t) zip_clean;
/*!< unmodified compressed pages */ /*!< unmodified compressed pages */
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
UT_LIST_BASE_NODE_T(buf_page_t) zip_free[BUF_BUDDY_SIZES]; UT_LIST_BASE_NODE_T(buf_page_t) zip_free[BUF_BUDDY_SIZES];
/*!< buddy free lists */ /*!< buddy free lists */
#if BUF_BUDDY_HIGH != UNIV_PAGE_SIZE #if BUF_BUDDY_HIGH != UNIV_PAGE_SIZE
......
/***************************************************************************** /*****************************************************************************
Copyright (c) 1995, 2009, Innobase Oy. All Rights Reserved. Copyright (c) 1995, 2011, Oracle and/or its affiliates. All Rights Reserved.
This program is free software; you can redistribute it and/or modify it under This program is free software; you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software the terms of the GNU General Public License as published by the Free Software
...@@ -84,6 +84,7 @@ void ...@@ -84,6 +84,7 @@ void
buf_LRU_invalidate_tablespace( buf_LRU_invalidate_tablespace(
/*==========================*/ /*==========================*/
ulint id); /*!< in: space id */ ulint id); /*!< in: space id */
#if defined UNIV_DEBUG || defined UNIV_BUF_DEBUG
/********************************************************************//** /********************************************************************//**
Insert a compressed block into buf_pool->zip_clean in the LRU order. */ Insert a compressed block into buf_pool->zip_clean in the LRU order. */
UNIV_INTERN UNIV_INTERN
...@@ -91,6 +92,7 @@ void ...@@ -91,6 +92,7 @@ void
buf_LRU_insert_zip_clean( buf_LRU_insert_zip_clean(
/*=====================*/ /*=====================*/
buf_page_t* bpage); /*!< in: pointer to the block in question */ buf_page_t* bpage); /*!< in: pointer to the block in question */
#endif /* UNIV_DEBUG || UNIV_BUF_DEBUG */
/******************************************************************//** /******************************************************************//**
Try to free a block. If bpage is a descriptor of a compressed-only Try to free a block. If bpage is a descriptor of a compressed-only
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment