Commit 44e408b9 authored by marko's avatar marko

branches/innodb+: Merge revisions 3602:3608 from branches/zip:

  ------------------------------------------------------------------------
  r3607 | marko | 2008-12-30 22:33:31 +0200 (Tue, 30 Dec 2008) | 20 lines

  branches/zip: Remove the dependency on the MySQL HASH table implementation.
  Use the InnoDB hash table for keeping track of INNOBASE_SHARE objects.

  struct st_innobase_share: Make table_name const uchar*.  Add the member
  table_name_hash.

  innobase_open_tables: Change the type from HASH to hash_table_t*.

  innobase_get_key(): Remove.

  innobase_fold_name(): New function, for computing the fold value for the
  InnoDB hash table.

  get_share(), free_share(): Use the InnoDB hash functions.

  innobase_end(): Free innobase_open_tables before shutting down InnoDB.
  Shutting down InnoDB will invalidate all memory allocated via InnoDB.

  rb://65 approved by Heikki Tuuri.  This addresses Issue #104.
  ------------------------------------------------------------------------
  r3608 | marko | 2008-12-30 22:45:04 +0200 (Tue, 30 Dec 2008) | 22 lines

  branches/zip: When setting the PAGE_LEVEL of a compressed B-tree page
  from or to 0, compress the page at the same time.  This is necessary,
  because the column information stored on the compressed page will
  differ between leaf and non-leaf pages.  Leaf pages are identified by
  PAGE_LEVEL=0.  This bug was reported as Issue #150.

  Document the similarity between btr_page_create() and
  btr_page_empty().  Make the function signature of btr_page_empty()
  identical with btr_page_create().  (This will add the parameter "level".)

  btr_root_raise_and_insert(): Replace some code with a call to
  btr_page_empty().

  btr_attach_half_pages(): Assert that the page level has already been
  set on both block and new_block.  Do not set it again.

  btr_discard_only_page_on_level(): Document that this function is
  probably never called.  Make it work on any height tree.  (Tested on
  2-high tree by disabling btr_lift_page_up().)

  rb://68
  ------------------------------------------------------------------------
parent 91b39ec6
......@@ -263,7 +263,7 @@ btr_get_next_user_rec(
/******************************************************************
Creates a new index page (not the root, and also not
used in page reorganization). */
used in page reorganization). @see btr_page_empty(). */
static
void
btr_page_create(
......@@ -1068,19 +1068,21 @@ btr_parse_page_reorganize(
}
/*****************************************************************
Empties an index page. */
Empties an index page. @see btr_page_create().*/
static
void
btr_page_empty(
/*===========*/
buf_block_t* block, /* in: page to be emptied */
page_zip_des_t* page_zip,/* out: compressed page, or NULL */
mtr_t* mtr, /* in: mtr */
dict_index_t* index) /* in: index of the page */
dict_index_t* index, /* in: index of the page */
ulint level, /* in: the B-tree level of the page */
mtr_t* mtr) /* in: mtr */
{
page_t* page = buf_block_get_frame(block);
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
ut_ad(page_zip == buf_block_get_page_zip(block));
#ifdef UNIV_ZIP_DEBUG
ut_a(!page_zip || page_zip_validate(page_zip, page));
#endif /* UNIV_ZIP_DEBUG */
......@@ -1091,10 +1093,10 @@ btr_page_empty(
segment headers, next page-field, etc.) is preserved intact */
if (UNIV_LIKELY_NULL(page_zip)) {
page_create_zip(block, index,
btr_page_get_level(page, mtr), mtr);
page_create_zip(block, index, level, mtr);
} else {
page_create(block, mtr, dict_table_is_comp(index->table));
btr_page_set_level(page, NULL, level, mtr);
}
block->check_index_page_at_flush = TRUE;
......@@ -1156,7 +1158,6 @@ btr_root_raise_and_insert(
ut_ad(mtr_memo_contains(mtr, dict_index_get_lock(index),
MTR_MEMO_X_LOCK));
ut_ad(mtr_memo_contains(mtr, root_block, MTR_MEMO_PAGE_X_FIX));
btr_search_drop_page_hash_index(root_block);
/* Allocate a new page to the tree. Root splitting is done by first
moving the root records to the new page, emptying the root, putting
......@@ -1229,12 +1230,7 @@ btr_root_raise_and_insert(
| REC_INFO_MIN_REC_FLAG);
/* Rebuild the root page to get free space */
if (UNIV_LIKELY_NULL(root_page_zip)) {
page_create_zip(root_block, index, level + 1, mtr);
} else {
page_create(root_block, mtr, dict_table_is_comp(index->table));
btr_page_set_level(root, NULL, level + 1, mtr);
}
btr_page_empty(root_block, root_page_zip, index, level + 1, mtr);
/* Set the next node and previous node fields, although
they should already have been set. The previous node field
......@@ -1244,8 +1240,6 @@ btr_root_raise_and_insert(
btr_page_set_next(root, root_page_zip, FIL_NULL, mtr);
btr_page_set_prev(root, root_page_zip, FIL_NULL, mtr);
root_block->check_index_page_at_flush = TRUE;
page_cursor = btr_cur_get_page_cur(cursor);
/* Insert node pointer to the root */
......@@ -1703,6 +1697,8 @@ btr_attach_half_pages(
/* Get the level of the split pages */
level = btr_page_get_level(buf_block_get_frame(block), mtr);
ut_ad(level
== btr_page_get_level(buf_block_get_frame(new_block), mtr));
/* Build the node pointer (= node key and page address) for the upper
half */
......@@ -1759,11 +1755,9 @@ btr_attach_half_pages(
btr_page_set_prev(lower_page, lower_page_zip, prev_page_no, mtr);
btr_page_set_next(lower_page, lower_page_zip, upper_page_no, mtr);
btr_page_set_level(lower_page, lower_page_zip, level, mtr);
btr_page_set_prev(upper_page, upper_page_zip, lower_page_no, mtr);
btr_page_set_next(upper_page, upper_page_zip, next_page_no, mtr);
btr_page_set_level(upper_page, upper_page_zip, level, mtr);
}
/*****************************************************************
......@@ -2367,11 +2361,7 @@ btr_lift_page_up(
btr_search_drop_page_hash_index(block);
/* Make the father empty */
btr_page_empty(father_block, father_page_zip, mtr, index);
/* Set the level before inserting records, because
page_zip_compress() requires that the first user record
on a non-leaf page has the min_rec_mark set. */
btr_page_set_level(father_page, father_page_zip, page_level, mtr);
btr_page_empty(father_block, father_page_zip, index, page_level, mtr);
/* Copy the records to the father page one by one. */
if (0
......@@ -2418,7 +2408,7 @@ btr_lift_page_up(
/* Free the file page */
btr_page_free(index, block, mtr);
/* We play safe and reset the free bits for the father */
/* We play it safe and reset the free bits for the father */
if (!dict_index_is_clust(index)) {
ibuf_reset_free_bits(father_block);
}
......@@ -2719,7 +2709,10 @@ btr_compress(
}
/*****************************************************************
Discards a page that is the only page on its level. */
Discards a page that is the only page on its level. This will empty
the whole B-tree, leaving just an empty root page. This function
should never be reached, because btr_compress(), which is invoked in
delete operations, calls btr_lift_page_up() to flatten the B-tree. */
static
void
btr_discard_only_page_on_level(
......@@ -2728,60 +2721,52 @@ btr_discard_only_page_on_level(
buf_block_t* block, /* in: page which is the only on its level */
mtr_t* mtr) /* in: mtr */
{
btr_cur_t father_cursor;
buf_block_t* father_block;
page_t* father_page;
page_zip_des_t* father_page_zip;
page_t* page = buf_block_get_frame(block);
ulint page_level;
ulint page_level = 0;
ut_ad(btr_page_get_prev(page, mtr) == FIL_NULL);
ut_ad(btr_page_get_next(page, mtr) == FIL_NULL);
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
btr_search_drop_page_hash_index(block);
while (buf_block_get_page_no(block) != dict_index_get_page(index)) {
btr_cur_t cursor;
buf_block_t* father;
const page_t* page = buf_block_get_frame(block);
btr_page_get_father(index, block, mtr, &father_cursor);
father_block = btr_cur_get_block(&father_cursor);
father_page_zip = buf_block_get_page_zip(father_block);
father_page = buf_block_get_frame(father_block);
ut_a(page_get_n_recs(page) == 1);
ut_a(page_level == btr_page_get_level(page, mtr));
ut_a(btr_page_get_prev(page, mtr) == FIL_NULL);
ut_a(btr_page_get_next(page, mtr) == FIL_NULL);
page_level = btr_page_get_level(page, mtr);
ut_ad(mtr_memo_contains(mtr, block, MTR_MEMO_PAGE_X_FIX));
btr_search_drop_page_hash_index(block);
lock_update_discard(father_block, PAGE_HEAP_NO_SUPREMUM, block);
btr_page_get_father(index, block, mtr, &cursor);
father = btr_cur_get_block(&cursor);
btr_page_set_level(father_page, father_page_zip, page_level, mtr);
lock_update_discard(father, PAGE_HEAP_NO_SUPREMUM, block);
/* Free the file page */
btr_page_free(index, block, mtr);
/* Free the file page */
btr_page_free(index, block, mtr);
if (UNIV_LIKELY(buf_block_get_page_no(father_block)
== dict_index_get_page(index))) {
/* The father is the root page */
block = father;
page_level++;
}
/* block is the root page, which must be empty, except
for the node pointer to the (now discarded) block(s). */
#ifdef UNIV_BTR_DEBUG
if (!dict_index_is_ibuf(index)) {
const page_t* root
= buf_block_get_frame(father_block);
const ulint space
= dict_index_get_space(index);
ut_a(btr_root_fseg_validate(
FIL_PAGE_DATA + PAGE_BTR_SEG_LEAF
+ root, space));
ut_a(btr_root_fseg_validate(
FIL_PAGE_DATA + PAGE_BTR_SEG_TOP
+ root, space));
}
if (!dict_index_is_ibuf(index)) {
const page_t* root = buf_block_get_frame(block);
const ulint space = dict_index_get_space(index);
ut_a(btr_root_fseg_validate(FIL_PAGE_DATA + PAGE_BTR_SEG_LEAF
+ root, space));
ut_a(btr_root_fseg_validate(FIL_PAGE_DATA + PAGE_BTR_SEG_TOP
+ root, space));
}
#endif /* UNIV_BTR_DEBUG */
btr_page_empty(father_block, father_page_zip, mtr, index);
/* We play safe and reset the free bits for the father */
if (!dict_index_is_clust(index)) {
ibuf_reset_free_bits(father_block);
}
} else {
ut_ad(page_get_n_recs(father_page) == 1);
btr_page_empty(block, buf_block_get_page_zip(block), index, 0, mtr);
btr_discard_only_page_on_level(index, father_block, mtr);
/* We play it safe and reset the free bits for the root */
if (!dict_index_is_clust(index)) {
ibuf_reset_free_bits(block);
}
}
......
......@@ -28,7 +28,6 @@
#include <mysql_priv.h>
#include <m_ctype.h>
#include <hash.h>
#include <mysys_err.h>
#include <mysql/plugin.h>
......@@ -180,14 +179,12 @@ it every INNOBASE_WAKE_INTERVAL'th step. */
#define INNOBASE_WAKE_INTERVAL 32
static ulong innobase_active_counter = 0;
static HASH innobase_open_tables;
static hash_table_t* innobase_open_tables;
#ifdef __NETWARE__ /* some special cleanup for NetWare */
bool nw_panic = FALSE;
#endif
static uchar* innobase_get_key(INNOBASE_SHARE *share, size_t *length,
my_bool not_used __attribute__((unused)));
static INNOBASE_SHARE *get_share(const char *table_name);
static void free_share(INNOBASE_SHARE *share);
static int innobase_close_connection(handlerton *hton, THD* thd);
......@@ -2114,8 +2111,7 @@ innobase_init(
goto error;
}
(void) hash_init(&innobase_open_tables,system_charset_info, 32, 0, 0,
(hash_get_key) innobase_get_key, 0, 0);
innobase_open_tables = hash_create(200);
pthread_mutex_init(&innobase_share_mutex, MY_MUTEX_INIT_FAST);
pthread_mutex_init(&prepare_commit_mutex, MY_MUTEX_INIT_FAST);
pthread_mutex_init(&commit_threads_m, MY_MUTEX_INIT_FAST);
......@@ -2159,10 +2155,11 @@ innobase_end(handlerton *hton, ha_panic_function type)
srv_fast_shutdown = (ulint) innobase_fast_shutdown;
innodb_inited = 0;
hash_table_free(innobase_open_tables);
innobase_open_tables = NULL;
if (innobase_shutdown_for_mysql() != DB_SUCCESS) {
err = 1;
}
hash_free(&innobase_open_tables);
my_free(internal_innobase_data_file_path,
MYF(MY_ALLOW_ZERO_PTR));
pthread_mutex_destroy(&innobase_share_mutex);
......@@ -8133,12 +8130,21 @@ bool innobase_show_status(handlerton *hton, THD* thd,
locking.
****************************************************************************/
static uchar* innobase_get_key(INNOBASE_SHARE* share, size_t *length,
my_bool not_used __attribute__((unused)))
/****************************************************************************
Folds a string in system_charset_info. */
static
ulint
innobase_fold_name(
/*===============*/
/* out: fold value of the name */
const uchar* name, /* in: string to be folded */
size_t length) /* in: length of the name in bytes */
{
*length=share->table_name_length;
ulong n1 = 1, n2 = 4;
return (uchar*) share->table_name;
system_charset_info->coll->hash_sort(system_charset_info,
name, length, &n1, &n2);
return((ulint) n1);
}
static INNOBASE_SHARE* get_share(const char* table_name)
......@@ -8147,24 +8153,29 @@ static INNOBASE_SHARE* get_share(const char* table_name)
pthread_mutex_lock(&innobase_share_mutex);
uint length=(uint) strlen(table_name);
if (!(share=(INNOBASE_SHARE*) hash_search(&innobase_open_tables,
(uchar*) table_name,
length))) {
ulint fold = innobase_fold_name((const uchar*) table_name, length);
HASH_SEARCH(table_name_hash, innobase_open_tables, fold,
INNOBASE_SHARE*, share,
!my_strnncoll(system_charset_info,
share->table_name,
share->table_name_length,
(const uchar*) table_name, length));
if (!share) {
/* TODO: invoke HASH_MIGRATE if innobase_open_tables
grows too big */
share = (INNOBASE_SHARE *) my_malloc(sizeof(*share)+length+1,
MYF(MY_FAE | MY_ZEROFILL));
share->table_name_length=length;
share->table_name=(char*) (share+1);
strmov(share->table_name,table_name);
share->table_name_length = length;
share->table_name = (uchar*) memcpy(share + 1,
table_name, length + 1);
if (my_hash_insert(&innobase_open_tables,
(uchar*) share)) {
pthread_mutex_unlock(&innobase_share_mutex);
my_free(share,0);
return(0);
}
HASH_INSERT(INNOBASE_SHARE, table_name_hash,
innobase_open_tables, fold, share);
thr_lock_init(&share->lock);
pthread_mutex_init(&share->mutex,MY_MUTEX_INIT_FAST);
......@@ -8180,11 +8191,34 @@ static void free_share(INNOBASE_SHARE* share)
{
pthread_mutex_lock(&innobase_share_mutex);
#ifdef UNIV_DEBUG
INNOBASE_SHARE* share2;
ulint fold = innobase_fold_name(share->table_name,
share->table_name_length);
HASH_SEARCH(table_name_hash, innobase_open_tables, fold,
INNOBASE_SHARE*, share2,
!my_strnncoll(system_charset_info,
share->table_name,
share->table_name_length,
share2->table_name,
share2->table_name_length));
ut_a(share2 == share);
#endif /* UNIV_DEBUG */
if (!--share->use_count) {
hash_delete(&innobase_open_tables, (uchar*) share);
ulint fold = innobase_fold_name(share->table_name,
share->table_name_length);
HASH_DELETE(INNOBASE_SHARE, table_name_hash,
innobase_open_tables, fold, share);
thr_lock_delete(&share->lock);
pthread_mutex_destroy(&share->mutex);
my_free(share, MYF(0));
/* TODO: invoke HASH_MIGRATE if innobase_open_tables
shrinks too much */
}
pthread_mutex_unlock(&innobase_share_mutex);
......
......@@ -27,8 +27,9 @@
typedef struct st_innobase_share {
THR_LOCK lock;
pthread_mutex_t mutex;
char *table_name;
const uchar *table_name;
uint table_name_length,use_count;
void* table_name_hash;
} INNOBASE_SHARE;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment