Commit 7a8cc852 authored by Marko Mäkelä's avatar Marko Mäkelä

MDEV-35049: btr_search_check_free_space_in_heap() is a bottleneck

MEM_HEAP_BTR_SEARCH: Remove. Let us handle this special type of
mem_heap_t allocations in the only compilation unit, btr0sea.cc.

mem_block_info_t::ahi_block: Replaces free_block. This caches one
buffer page for use in adaptive hash index allocations. This is
protected by btr_search_sys_t::partition::latch. It only is
Atomic_relaxed because btr_search_free_space() is following a
pattern of test, lock, and test.

btr_search_check_free_space(): Protect the ahi_block with a
shared AHI partition latch. We must recheck btr_search_enabled after
acquiring the latch in order to avoid a race condition with
btr_search_disable(). Using a shared latch instead of an exclusive one
should reduce contention with btr_search_guess_on_hash() and other
operations when running with innodb_adaptive_hash_index=ON.

This has been tested by running the regression test suite
with the adaptive hash index enabled:
./mtr --mysqld=--loose-innodb-adaptive-hash-index=ON
parent cc70ca7e
......@@ -55,6 +55,81 @@ mysql_pfs_key_t btr_search_latch_key;
/** The adaptive hash index */
btr_search_sys_t btr_search_sys;
inline void btr_search_sys_t::partition::init() noexcept
{
memset((void*) this, 0, sizeof *this);
latch.SRW_LOCK_INIT(btr_search_latch_key);
}
inline void btr_search_sys_t::partition::clear() noexcept
{
#ifndef SUX_LOCK_GENERIC
ut_ad(latch.is_write_locked());
#endif
ut_ad(heap->type == MEM_HEAP_BUFFER);
if (buf_block_t *b= heap->ahi_block)
buf_pool.free_block(b);
ut_d(heap->ahi_block= nullptr);
mem_heap_free(heap);
heap= nullptr;
ut_free(table.array);
}
inline void btr_search_sys_t::partition::free() noexcept
{
if (heap)
{
ut_d(latch.wr_lock(SRW_LOCK_CALL));
clear();
ut_d(latch.wr_unlock());
}
latch.destroy();
}
inline void btr_search_sys_t::partition::alloc(ulint hash_size) noexcept
{
table.create(hash_size);
heap= mem_heap_create_typed(std::min<ulong>(4096,
MEM_MAX_ALLOC_IN_BUF / 2 -
MEM_BLOCK_HEADER_SIZE -
MEM_SPACE_NEEDED(0)),
MEM_HEAP_BUFFER);
}
void btr_search_sys_t::create() noexcept
{
parts= static_cast<partition*>(ut_malloc(btr_ahi_parts * sizeof *parts,
mem_key_ahi));
for (ulong i= 0; i < btr_ahi_parts; ++i)
parts[i].init();
if (btr_search_enabled)
btr_search_enable();
}
void btr_search_sys_t::alloc(ulint hash_size) noexcept
{
hash_size/= btr_ahi_parts;
for (ulong i= 0; i < btr_ahi_parts; ++i)
parts[i].alloc(hash_size);
}
inline void btr_search_sys_t::clear() noexcept
{
for (ulong i= 0; i < btr_ahi_parts; ++i)
parts[i].clear();
}
void btr_search_sys_t::free() noexcept
{
if (parts)
{
for (ulong i= 0; i < btr_ahi_parts; ++i)
parts[i].free();
ut_free(parts);
parts= nullptr;
}
}
/** If the number of records on the page divided by this parameter
would have been successfully accessed using a hash index, the index
is then built on the page, assuming the global limit has been reached */
......@@ -155,33 +230,38 @@ btr_search_get_n_fields(
return(btr_search_get_n_fields(cursor->n_fields, cursor->n_bytes));
}
/** This function should be called before reserving any btr search mutex, if
the intended operation might add nodes to the search system hash table.
Because of the latching order, once we have reserved the btr search system
latch, we cannot allocate a free frame from the buffer pool. Checks that
there is a free buffer frame allocated for hash table heap in the btr search
system. If not, allocates a free frames for the heap. This check makes it
probable that, when have reserved the btr search system latch and we need to
allocate a new node to the hash table, it will succeed. However, the check
will not guarantee success.
@param[in] index index handler */
/** This function should be called if the intended operation might add nodes
to btr_search_sys.
Because of the latching order, once we have acquired a btr_search_sys latch,
we cannot acquire buf_pool.mutex to allocate a buffer block. By ensuring
that the heap will point to a pre-allocated block we attempt to ensure the
success of a subsequent mem_heap_create_block_func(), but do not guarantee it.
@param index B-tree */
static void btr_search_check_free_space_in_heap(const dict_index_t *index)
{
/* Note that we peek the value of heap->free_block without reserving
the latch: this is ok, because we will not guarantee that there will
be enough free space in the hash table. */
buf_block_t *block= buf_block_alloc();
auto part= btr_search_sys.get_part(*index);
part->latch.wr_lock(SRW_LOCK_CALL);
if (!btr_search_enabled || part->heap->free_block)
buf_block_free(block);
else
part->heap->free_block= block;
/* heap->ahi_block may be consumed by a concurrent invocation of
mem_heap_create_block_func(), which must be covered by an exclusive
part->latch. It would also be cleared by
btr_search_sys_t::partition::clear() in btr_search_disable(), or
btr_search_sys_t::partition::free() in innodb_shutdown(). */
part->latch.wr_unlock();
if (!part->heap->ahi_block)
{
buf_block_t *block= buf_block_alloc();
part->latch.rd_lock(SRW_LOCK_CALL);
/* Even though our callers already checked for btr_search_enabled,
we must recheck it while holding part->latch, because
btr_search_disable() may have invoked part->clear() meanwhile. */
const bool filled{btr_search_enabled && !part->heap->ahi_block};
if (filled)
part->heap->ahi_block= block;
part->latch.rd_unlock();
if (!filled)
buf_block_free(block);
}
}
/** Set index->ref_count = 0 on all indexes of a table.
......@@ -458,31 +538,22 @@ btr_search_update_block_hash_info(btr_search_t* info, buf_block_t* block)
constexpr ulint MAX_N_POINTERS = UNIV_PAGE_SIZE_MAX / REC_N_NEW_EXTRA_BYTES;
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
__attribute__((nonnull))
/**
Insert an entry into the hash table. If an entry with the same fold number
is found, its node is updated to point to the new data, and no new node
is inserted.
@param table hash table
@param heap memory heap
@param fold folded value of the record
@param block buffer block containing the record
@param data the record
@retval true on success
@retval false if no more memory could be allocated */
static bool ha_insert_for_fold(hash_table_t *table, mem_heap_t* heap,
ulint fold,
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t *block, /*!< buffer block of data */
void btr_search_sys_t::partition::insert(ulint fold, const rec_t *rec,
buf_block_t *block) noexcept
#else
void btr_search_sys_t::partition::insert(ulint fold, const rec_t *rec) noexcept
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
const rec_t *data)
{
#ifndef SUX_LOCK_GENERIC
ut_ad(latch.is_write_locked());
#endif
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ut_a(block->page.frame == page_align(data));
ut_a(block->page.frame == page_align(rec));
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
ut_ad(btr_search_enabled);
hash_cell_t *cell= &table->array[table->calc_hash(fold)];
hash_cell_t *cell= &table.array[table.calc_hash(fold)];
for (ha_node_t *prev= static_cast<ha_node_t*>(cell->node); prev;
prev= prev->next)
......@@ -497,22 +568,60 @@ static bool ha_insert_for_fold(hash_table_t *table, mem_heap_t* heap,
prev->block= block;
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
prev->data= data;
return true;
prev->data= rec;
return;
}
}
/* We have to allocate a new chain node */
ha_node_t *node= static_cast<ha_node_t*>(mem_heap_alloc(heap, sizeof *node));
ha_node_t *node;
if (!node)
return false;
{
/* This is based on mem_heap_alloc(), but specialized for the
adaptive hash index. */
ut_ad(heap->type == MEM_HEAP_BUFFER);
mem_block_t *block= UT_LIST_GET_LAST(heap->base);
size_t n= sizeof *node + REDZONE_SIZE;
if (mem_block_get_len(block) < mem_block_get_free(block) +
MEM_SPACE_NEEDED(n))
{
buf_block_t *buf_block= heap->ahi_block;
if (!buf_block)
return;
heap->ahi_block= nullptr;
mem_block_t *new_block=
reinterpret_cast<mem_block_t*>(buf_block->page.frame);
MEM_UNDEFINED(new_block, srv_page_size);
new_block->buf_block= buf_block;
new_block->ahi_block= nullptr;
ut_d(static_assert(sizeof new_block->file_name == 8, ""));
ut_d(memcpy(new_block->file_name, "btr0sea", 8));
ut_d(block->line= __LINE__);
heap->total_size+= MEM_MAX_ALLOC_IN_BUF;
mem_block_set_len(new_block, MEM_MAX_ALLOC_IN_BUF);
mem_block_set_type(new_block, MEM_HEAP_BUFFER);
mem_block_set_free(new_block, MEM_BLOCK_HEADER_SIZE);
mem_block_set_start(new_block, MEM_BLOCK_HEADER_SIZE);
ut_d(new_block->total_size= ULINT_UNDEFINED);
MEM_UNDEFINED(&new_block->total_size, sizeof block->total_size);
MEM_NOACCESS(new_block + 1, srv_page_size - sizeof *new_block);
UT_LIST_INSERT_AFTER(heap->base, block, new_block);
block= new_block;
}
ha_node_set_data(node, block, data);
const size_t free= mem_block_get_free(block);
mem_block_set_free(block, free + MEM_SPACE_NEEDED(n));
char *buf= reinterpret_cast<char*>(block) + free + REDZONE_SIZE;
MEM_MAKE_ADDRESSABLE(buf, n - REDZONE_SIZE);
node= reinterpret_cast<ha_node_t*>(buf);
}
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
ut_a(block->n_pointers++ < MAX_N_POINTERS);
node->block= block;
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
node->data= rec;
node->fold= fold;
node->next= nullptr;
......@@ -526,7 +635,6 @@ static bool ha_insert_for_fold(hash_table_t *table, mem_heap_t* heap,
prev= prev->next;
prev->next= node;
}
return true;
}
__attribute__((nonnull))
......@@ -661,8 +769,9 @@ static bool ha_search_and_update_if_found(hash_table_t *table, ulint fold,
}
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
# define ha_insert_for_fold(p,f,b,d) (p)->insert(f,d,b)
#else
# define ha_insert_for_fold(t,h,f,b,d) ha_insert_for_fold(t,h,f,d)
# define ha_insert_for_fold(p,f,b,d) (p)->insert(f,d)
# define ha_search_and_update_if_found(table,fold,data,new_block,new_data) \
ha_search_and_update_if_found(table,fold,data,new_data)
#endif
......@@ -736,7 +845,7 @@ btr_search_update_hash_ref(
mem_heap_free(heap);
}
ha_insert_for_fold(&part->table, part->heap, fold, block, rec);
ha_insert_for_fold(part, fold, block, rec);
MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_ADDED);
}
......@@ -1609,8 +1718,7 @@ btr_search_build_page_hash_index(
{
auto part = btr_search_sys.get_part(*index);
for (ulint i = 0; i < n_cached; i++) {
ha_insert_for_fold(&part->table, part->heap,
folds[i], block, recs[i]);
ha_insert_for_fold(part, folds[i], block, recs[i]);
}
}
......@@ -1995,8 +2103,7 @@ void btr_search_update_hash_on_insert(btr_cur_t *cursor,
}
part = btr_search_sys.get_part(*index);
ha_insert_for_fold(&part->table, part->heap,
ins_fold, block, ins_rec);
ha_insert_for_fold(part, ins_fold, block, ins_rec);
MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_ADDED);
}
......@@ -2017,11 +2124,9 @@ void btr_search_update_hash_on_insert(btr_cur_t *cursor,
}
if (!left_side) {
ha_insert_for_fold(&part->table, part->heap,
fold, block, rec);
ha_insert_for_fold(part, fold, block, rec);
} else {
ha_insert_for_fold(&part->table, part->heap,
ins_fold, block, ins_rec);
ha_insert_for_fold(part, ins_fold, block, ins_rec);
}
MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_ADDED);
}
......@@ -2041,8 +2146,7 @@ void btr_search_update_hash_on_insert(btr_cur_t *cursor,
part = btr_search_sys.get_part(*index);
}
ha_insert_for_fold(&part->table, part->heap,
ins_fold, block, ins_rec);
ha_insert_for_fold(part, ins_fold, block, ins_rec);
MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_ADDED);
}
......@@ -2062,11 +2166,9 @@ void btr_search_update_hash_on_insert(btr_cur_t *cursor,
}
if (!left_side) {
ha_insert_for_fold(&part->table, part->heap,
ins_fold, block, ins_rec);
ha_insert_for_fold(part, ins_fold, block, ins_rec);
} else {
ha_insert_for_fold(&part->table, part->heap,
next_fold, block, next_rec);
ha_insert_for_fold(part, next_fold, block, next_rec);
}
MONITOR_INC(MONITOR_ADAPTIVE_HASH_ROW_ADDED);
}
......
......@@ -30,7 +30,7 @@ Created 2/17/1996 Heikki Tuuri
#include "dict0dict.h"
#ifdef BTR_CUR_HASH_ADAPT
#include "ha0ha.h"
#include "srw_lock.h"
#include "buf0buf.h"
#ifdef UNIV_PFS_RWLOCK
extern mysql_pfs_key_t btr_search_latch_key;
......@@ -240,69 +240,47 @@ struct btr_search_sys_t
/** Partition of the hash table */
struct partition
{
/** latches protecting hash_table */
srw_spin_lock latch;
/** latches protecting the hash table */
alignas(CPU_LEVEL1_DCACHE_LINESIZE) srw_spin_lock latch;
/** mapping of dtuple_fold() to rec_t* in buf_block_t::frame */
hash_table_t table;
/** memory heap for table */
mem_heap_t *heap;
#ifdef _MSC_VER
#pragma warning(push)
// nonstandard extension - zero sized array, if perfschema is not compiled
#pragma warning(disable : 4200)
#endif
inline void init() noexcept;
char pad[(CPU_LEVEL1_DCACHE_LINESIZE - sizeof latch -
sizeof table - sizeof heap) &
(CPU_LEVEL1_DCACHE_LINESIZE - 1)];
inline void alloc(ulint hash_size) noexcept;
#ifdef _MSC_VER
#pragma warning(pop)
#endif
inline void clear() noexcept;
inline void free() noexcept;
void init()
{
memset((void*) this, 0, sizeof *this);
latch.SRW_LOCK_INIT(btr_search_latch_key);
}
void alloc(ulint hash_size)
{
table.create(hash_size);
heap= mem_heap_create_typed(std::min<ulong>(4096,
MEM_MAX_ALLOC_IN_BUF / 2
- MEM_BLOCK_HEADER_SIZE
- MEM_SPACE_NEEDED(0)),
MEM_HEAP_FOR_BTR_SEARCH);
}
void clear()
{
mem_heap_free(heap);
heap= nullptr;
ut_free(table.array);
}
void free()
{
latch.destroy();
if (heap)
clear();
}
__attribute__((nonnull))
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/** Insert or replace an entry into the hash table.
@param fold hash value of data
@param rec B-tree leaf page record
@param block the buffer block that contains rec */
void insert(ulint fold, const rec_t *rec, buf_block_t *block) noexcept;
#else
/** Insert or replace an entry into the hash table.
@param fold hash value of data
@param rec B-tree leaf page record */
void insert(ulint fold, const rec_t *rec) noexcept;
#endif
};
/** Partitions of the adaptive hash index */
partition *parts;
/** Get an adaptive hash index partition */
partition *get_part(index_id_t id, ulint space_id) const
partition *get_part(index_id_t id, ulint space_id) const noexcept
{
return parts + ut_fold_ulint_pair(ulint(id), space_id) % btr_ahi_parts;
}
/** Get an adaptive hash index partition */
partition *get_part(const dict_index_t &index) const
partition *get_part(const dict_index_t &index) const noexcept
{
ut_ad(!index.table->space ||
index.table->space->id == index.table->space_id);
......@@ -314,37 +292,15 @@ struct btr_search_sys_t
{ return &get_part(index)->latch; }
/** Create and initialize at startup */
void create()
{
parts= static_cast<partition*>(ut_malloc(btr_ahi_parts * sizeof *parts,
mem_key_ahi));
for (ulong i= 0; i < btr_ahi_parts; ++i)
parts[i].init();
if (btr_search_enabled)
btr_search_enable();
}
void create() noexcept;
void alloc(ulint hash_size)
{
hash_size/= btr_ahi_parts;
for (ulong i= 0; i < btr_ahi_parts; ++i)
parts[i].alloc(hash_size);
}
void alloc(ulint hash_size) noexcept;
/** Clear when disabling the adaptive hash index */
void clear() { for (ulong i= 0; i < btr_ahi_parts; ++i) parts[i].clear(); }
inline void clear() noexcept;
/** Free at shutdown */
void free()
{
if (parts)
{
for (ulong i= 0; i < btr_ahi_parts; ++i)
parts[i].free();
ut_free(parts);
parts= nullptr;
}
}
void free() noexcept;
};
/** The adaptive hash index */
......
......@@ -950,7 +950,7 @@ struct buf_block_t{
Another exception is that ha_insert_for_fold() may
decrement n_pointers without holding the appropriate latch
in btr_search_latches[]. Thus, n_pointers must be
in btr_search_sys.parts[]. Thus, n_pointers must be
protected by atomic memory access.
This implies that the fields may be read without race
......
......@@ -39,38 +39,6 @@ ha_node_get_data(
return(node->data);
}
/******************************************************************//**
Sets hash node data. */
UNIV_INLINE
void
ha_node_set_data_func(
/*==================*/
ha_node_t* node, /*!< in: hash chain node */
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
buf_block_t* block, /*!< in: buffer block containing the data */
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
const rec_t* data) /*!< in: pointer to the data */
{
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
node->block = block;
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
node->data = data;
}
#if defined UNIV_AHI_DEBUG || defined UNIV_DEBUG
/** Sets hash node data.
@param n in: hash chain node
@param b in: buffer block containing the data
@param d in: pointer to the data */
# define ha_node_set_data(n,b,d) ha_node_set_data_func(n,b,d)
#else /* UNIV_AHI_DEBUG || UNIV_DEBUG */
/** Sets hash node data.
@param n in: hash chain node
@param b in: buffer block containing the data
@param d in: pointer to the data */
# define ha_node_set_data(n,b,d) ha_node_set_data_func(n,d)
#endif /* UNIV_AHI_DEBUG || UNIV_DEBUG */
/******************************************************************//**
Gets the next node in a hash chain.
@return next node, NULL if none */
......
......@@ -28,8 +28,6 @@ Created 6/9/1994 Heikki Tuuri
#define mem0mem_h
#include "ut0mem.h"
#include "ut0rnd.h"
#include "mach0data.h"
#include <memory>
......@@ -42,22 +40,14 @@ typedef struct mem_block_info_t mem_block_t;
/** A memory heap is a nonempty linear list of memory blocks */
typedef mem_block_t mem_heap_t;
struct buf_block_t;
/** Types of allocation for memory heaps: DYNAMIC means allocation from the
dynamic memory pool of the C compiler, BUFFER means allocation from the
buffer pool; the latter method is used for very big heaps */
#define MEM_HEAP_DYNAMIC 0 /* the most common type */
#define MEM_HEAP_BUFFER 1
#define MEM_HEAP_BTR_SEARCH 2 /* this flag can optionally be
ORed to MEM_HEAP_BUFFER, in which
case heap->free_block is used in
some cases for memory allocations,
and if it's NULL, the memory
allocation functions can return
NULL. */
/** Different type of heaps in terms of which datastructure is using them */
#define MEM_HEAP_FOR_BTR_SEARCH (MEM_HEAP_BTR_SEARCH | MEM_HEAP_BUFFER)
#define MEM_HEAP_FOR_LOCK_HEAP (MEM_HEAP_BUFFER)
/** The following start size is used for the first block in the memory heap if
......@@ -110,8 +100,7 @@ A single user buffer of 'size' will fit in the block.
@param[in] file_name File name where created
@param[in] line Line where created
@param[in] type Heap type
@return own: memory heap, NULL if did not succeed (only possible for
MEM_HEAP_BTR_SEARCH type heaps) */
@return own: memory heap */
UNIV_INLINE
mem_heap_t*
mem_heap_create_func(
......@@ -145,8 +134,7 @@ mem_heap_zalloc(
@param[in] heap memory heap
@param[in] n number of bytes; if the heap is allowed to grow into
the buffer pool, this must be <= MEM_MAX_ALLOC_IN_BUF
@return allocated storage, NULL if did not succeed (only possible for
MEM_HEAP_BTR_SEARCH type heaps) */
@return allocated storage */
UNIV_INLINE
void*
mem_heap_alloc(
......@@ -319,19 +307,17 @@ struct mem_block_info_t {
in the heap. This is defined only in the base
node and is set to ULINT_UNDEFINED in others. */
ulint type; /*!< type of heap: MEM_HEAP_DYNAMIC, or
MEM_HEAP_BUF possibly ORed to MEM_HEAP_BTR_SEARCH */
MEM_HEAP_BUFFER */
ulint free; /*!< offset in bytes of the first free position for
user data in the block */
ulint start; /*!< the value of the struct field 'free' at the
creation of the block */
void* free_block;
/* if the MEM_HEAP_BTR_SEARCH bit is set in type,
and this is the heap root, this can contain an
allocated buffer frame, which can be appended as a
free block to the heap, if we need more space;
otherwise, this is NULL */
void* buf_block;
#ifdef BTR_CUR_HASH_ADAPT
/** a cached block in the heap root */
Atomic_relaxed<buf_block_t*> ahi_block;
#endif
buf_block_t* buf_block;
/* if this block has been allocated from the buffer
pool, this contains the buf_block_t handle;
otherwise, this is NULL */
......
......@@ -39,8 +39,7 @@ Created 6/8/1994 Heikki Tuuri
#endif /* UNIV_DEBUG */
/***************************************************************//**
Creates a memory heap block where data can be allocated.
@return own: memory heap block, NULL if did not succeed (only possible
for MEM_HEAP_BTR_SEARCH type heaps) */
@return own: memory heap block */
mem_block_t*
mem_heap_create_block_func(
/*=======================*/
......@@ -62,19 +61,11 @@ mem_heap_block_free(
mem_heap_t* heap, /*!< in: heap */
mem_block_t* block); /*!< in: block to free */
/******************************************************************//**
Frees the free_block field from a memory heap. */
void
mem_heap_free_block_free(
/*=====================*/
mem_heap_t* heap); /*!< in: heap */
/***************************************************************//**
Adds a new block to a memory heap.
@param[in] heap memory heap
@param[in] n number of bytes needed
@return created block, NULL if did not succeed (only possible for
MEM_HEAP_BTR_SEARCH type heaps) */
@return created block */
mem_block_t*
mem_heap_add_block(
mem_heap_t* heap,
......@@ -100,9 +91,7 @@ UNIV_INLINE
void
mem_block_set_type(mem_block_t* block, ulint type)
{
ut_ad((type == MEM_HEAP_DYNAMIC) || (type == MEM_HEAP_BUFFER)
|| (type == MEM_HEAP_BUFFER + MEM_HEAP_BTR_SEARCH));
ut_ad(type == MEM_HEAP_DYNAMIC || type == MEM_HEAP_BUFFER);
block->type = type;
}
......@@ -157,8 +146,6 @@ mem_heap_zalloc(
mem_heap_t* heap,
ulint n)
{
ut_ad(heap);
ut_ad(!(heap->type & MEM_HEAP_BTR_SEARCH));
return(memset(mem_heap_alloc(heap, n), 0, n));
}
......@@ -166,8 +153,7 @@ mem_heap_zalloc(
@param[in] heap memory heap
@param[in] n number of bytes; if the heap is allowed to grow into
the buffer pool, this must be <= MEM_MAX_ALLOC_IN_BUF
@return allocated storage, NULL if did not succeed (only possible for
MEM_HEAP_BTR_SEARCH type heaps) */
@return allocated storage */
UNIV_INLINE
void*
mem_heap_alloc(
......@@ -289,11 +275,10 @@ void
mem_heap_empty(
mem_heap_t* heap)
{
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(!heap->ahi_block);
#endif
mem_heap_free_heap_top(heap, (byte*) heap + mem_block_get_start(heap));
if (heap->free_block) {
mem_heap_free_block_free(heap);
}
}
/** Returns a pointer to the topmost element in a memory heap.
......@@ -356,8 +341,7 @@ A single user buffer of 'size' will fit in the block.
@param[in] file_name File name where created
@param[in] line Line where created
@param[in] type Heap type
@return own: memory heap, NULL if did not succeed (only possible for
MEM_HEAP_BTR_SEARCH type heaps) */
@return own: memory heap */
UNIV_INLINE
mem_heap_t*
mem_heap_create_func(
......@@ -401,15 +385,15 @@ void
mem_heap_free(
mem_heap_t* heap)
{
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(!heap->ahi_block);
#endif
mem_block_t* block;
mem_block_t* prev_block;
block = UT_LIST_GET_LAST(heap->base);
if (heap->free_block) {
mem_heap_free_block_free(heap);
}
while (block != NULL) {
/* Store the contents of info before freeing current block
(it is erased in freeing) */
......@@ -430,13 +414,10 @@ mem_heap_get_size(
/*==============*/
mem_heap_t* heap) /*!< in: heap */
{
ulint size = heap->total_size;
if (heap->free_block) {
size += srv_page_size;
}
return(size);
#ifdef BTR_CUR_HASH_ADAPT
ut_ad(!heap->ahi_block);
#endif
return heap->total_size;
}
/**********************************************************************//**
......
......@@ -214,7 +214,6 @@ mem_heap_validate(
case MEM_HEAP_DYNAMIC:
break;
case MEM_HEAP_BUFFER:
case MEM_HEAP_BUFFER | MEM_HEAP_BTR_SEARCH:
ut_ad(block->len <= srv_page_size);
break;
default:
......@@ -241,8 +240,7 @@ static void ut_strlcpy_rev(char* dst, const char* src, ulint size)
/***************************************************************//**
Creates a memory heap block where data can be allocated.
@return own: memory heap block, NULL if did not succeed (only possible
for MEM_HEAP_BTR_SEARCH type heaps) */
@return own: memory heap block */
mem_block_t*
mem_heap_create_block_func(
/*=======================*/
......@@ -256,12 +254,11 @@ mem_heap_create_block_func(
ulint type) /*!< in: type of heap: MEM_HEAP_DYNAMIC or
MEM_HEAP_BUFFER */
{
buf_block_t* buf_block = NULL;
buf_block_t* buf_block;
mem_block_t* block;
ulint len;
ut_ad((type == MEM_HEAP_DYNAMIC) || (type == MEM_HEAP_BUFFER)
|| (type == MEM_HEAP_BUFFER + MEM_HEAP_BTR_SEARCH));
ut_ad(type == MEM_HEAP_DYNAMIC || type == MEM_HEAP_BUFFER);
if (heap != NULL) {
ut_d(mem_heap_validate(heap));
......@@ -275,24 +272,11 @@ mem_heap_create_block_func(
ut_ad(type == MEM_HEAP_DYNAMIC || n <= MEM_MAX_ALLOC_IN_BUF);
block = static_cast<mem_block_t*>(ut_malloc_nokey(len));
buf_block = nullptr;
} else {
len = srv_page_size;
if ((type & MEM_HEAP_BTR_SEARCH) && heap) {
/* We cannot allocate the block from the
buffer pool, but must get the free block from
the heap header free block field */
buf_block = static_cast<buf_block_t*>(heap->free_block);
heap->free_block = NULL;
if (UNIV_UNLIKELY(!buf_block)) {
return(NULL);
}
} else {
buf_block = buf_block_alloc();
}
buf_block = buf_block_alloc();
block = (mem_block_t*) buf_block->page.frame;
}
......@@ -303,7 +287,9 @@ mem_heap_create_block_func(
}
block->buf_block = buf_block;
block->free_block = NULL;
#ifdef BTR_CUR_HASH_ADAPT
block->ahi_block = nullptr;
#endif
ut_d(ut_strlcpy_rev(block->file_name, file_name,
sizeof(block->file_name)));
......@@ -339,8 +325,7 @@ mem_heap_create_block_func(
/***************************************************************//**
Adds a new block to a memory heap.
@return created block, NULL if did not succeed (only possible for
MEM_HEAP_BTR_SEARCH type heaps) */
@return created block */
mem_block_t*
mem_heap_add_block(
/*===============*/
......@@ -399,9 +384,6 @@ mem_heap_block_free(
{
ulint type;
ulint len;
buf_block_t* buf_block;
buf_block = static_cast<buf_block_t*>(block->buf_block);
UT_LIST_REMOVE(heap->base, block);
......@@ -412,25 +394,10 @@ mem_heap_block_free(
len = block->len;
if (type == MEM_HEAP_DYNAMIC || len < srv_page_size / 2) {
ut_ad(!buf_block);
ut_ad(!block->buf_block);
ut_free(block);
} else {
ut_ad(type & MEM_HEAP_BUFFER);
buf_block_free(buf_block);
}
}
/******************************************************************//**
Frees the free_block field from a memory heap. */
void
mem_heap_free_block_free(
/*=====================*/
mem_heap_t* heap) /*!< in: heap */
{
if (UNIV_LIKELY_NULL(heap->free_block)) {
buf_block_free(static_cast<buf_block_t*>(heap->free_block));
heap->free_block = NULL;
buf_block_free(block->buf_block);
}
}
......@@ -811,11 +811,10 @@ srv_printf_innodb_monitor(
for (ulint i = 0; i < btr_ahi_parts && btr_search_enabled; ++i) {
const auto part= &btr_search_sys.parts[i];
part->latch.rd_lock(SRW_LOCK_CALL);
ut_ad(part->heap->type == MEM_HEAP_FOR_BTR_SEARCH);
fprintf(file, "Hash table size " ULINTPF
", node heap has " ULINTPF " buffer(s)\n",
part->table.n_cells,
part->heap->base.count - !part->heap->free_block);
part->heap->base.count - !part->heap->ahi_block);
part->latch.rd_unlock();
}
......@@ -949,10 +948,11 @@ srv_export_innodb_status(void)
const auto part= &btr_search_sys.parts[i];
part->latch.rd_lock(SRW_LOCK_CALL);
if (part->heap) {
ut_ad(part->heap->type == MEM_HEAP_FOR_BTR_SEARCH);
mem_adaptive_hash += mem_heap_get_size(part->heap)
+ part->table.n_cells * sizeof(hash_cell_t);
ut_ad(part->heap->type == MEM_HEAP_BUFFER);
mem_adaptive_hash += part->heap->total_size
+ !!part->heap->ahi_block * srv_page_size
+ part->table.n_cells
* sizeof *part->table.array;
}
part->latch.rd_unlock();
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment