Commit 36122594 authored by Bjorn Munch's avatar Bjorn Munch

merge from 5.1 main minus merge jam pushes

parents 6f8928cf 59e022c1
......@@ -61,10 +61,15 @@ Usage: $0 [OPTIONS]
--cross-bootstrap For internal use. Used when building the MySQL system
tables on a different host than the target.
--datadir=path The path to the MySQL data directory.
--defaults-extra-file=name
Read this file after the global files are read.
--defaults-file=name Only read default options from the given file name.
--force Causes mysql_install_db to run even if DNS does not
work. In that case, grant table entries that normally
use hostnames will use IP addresses.
--help Display this help and exit.
--ldata=path The path to the MySQL data directory. Same as --datadir.
--no-defaults Don't read default options from any option file.
--rpm For internal use. This option is used by RPM files
during the MySQL installation process.
--skip-name-resolve Use IP addresses rather than hostnames when creating
......
......@@ -46,10 +46,15 @@ Usage: $0 [OPTIONS]
--cross-bootstrap For internal use. Used when building the MySQL system
tables on a different host than the target.
--datadir=path The path to the MySQL data directory.
--defaults-extra-file=name
Read this file after the global files are read.
--defaults-file=name Only read default options from the given file name.
--force Causes mysql_install_db to run even if DNS does not
work. In that case, grant table entries that normally
use hostnames will use IP addresses.
--help Display this help and exit.
--ldata=path The path to the MySQL data directory. Same as --datadir.
--no-defaults Don't read default options from any option file.
--rpm For internal use. This option is used by RPM files
during the MySQL installation process.
--skip-name-resolve Use IP addresses rather than hostnames when creating
......
......@@ -1683,16 +1683,16 @@ ibuf_add_free_page(
page = buf_page_get(space, page_no, RW_X_LATCH, &mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page, SYNC_TREE_NODE_NEW);
#endif /* UNIV_SYNC_DEBUG */
ibuf_enter();
mutex_enter(&ibuf_mutex);
root = ibuf_tree_root_get(ibuf_data, space, &mtr);
#ifdef UNIV_SYNC_DEBUG
buf_page_dbg_add_level(page, SYNC_TREE_NODE_NEW);
#endif /* UNIV_SYNC_DEBUG */
/* Add the page to the free list and update the ibuf size data */
flst_add_last(root + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
......
2011-09-12 The InnoDB Team
* row/row0sel.c:
Fix Bug#12601439 CONSISTENT READ FAILURE IN COLUMN PREFIX INDEX
2011-09-08 The InnoDB Team
* btr/btr0cur.c, include/page0page.h, include/row0upd.ic:
Fix Bug#12948130 UNNECESSARY X-LOCKING OF ADAPTIVE HASH INDEX
2011-09-06 The InnoDB Team
* buf/buf0buddy.c:
Fix Bug#12950803 62294: BUF_BUDDY_RELOCATE CALLS GETTIMEOFDAY
WHILE HOLDING BUFFER POOL MUTEX
2011-09-06 The InnoDB Team
* include/trx0undo.h, trx/trx0rec.c, trx/trx0undo.c:
Fix Bug#12547647 UPDATE LOGGING COULD EXCEED LOG PAGE SIZE
2011-08-29 The InnoDB Team
* btr/btr0btr.c, btr/btr0cur.c, fsp/fsp0fsp.c,
......@@ -7,11 +28,6 @@
Fix Bug#12704861 Corruption after a crash during BLOB update
and other regressions from the fix of Bug#12612184
2011-08-23 The InnoDB Team
* include/trx0undo.h, trx/trx0rec.c, trx/trx0undo.c:
Fix Bug#12547647 UPDATE LOGGING COULD EXCEED LOG PAGE SIZE
2011-08-15 The InnoDB Team
* btr/btr0btr.c, btr/btr0cur.c, btr/btr0pcur.c, btr/btr0sea.c,
......
......@@ -1727,6 +1727,7 @@ btr_cur_update_in_place(
roll_ptr_t roll_ptr = ut_dulint_zero;
trx_t* trx;
ulint was_delete_marked;
ibool is_hashed;
mem_heap_t* heap = NULL;
ulint offsets_[REC_OFFS_NORMAL_SIZE];
ulint* offsets = offsets_;
......@@ -1768,7 +1769,21 @@ btr_cur_update_in_place(
return(err);
}
if (block->is_hashed) {
if (!(flags & BTR_KEEP_SYS_FLAG)) {
row_upd_rec_sys_fields(rec, NULL,
index, offsets, trx, roll_ptr);
}
was_delete_marked = rec_get_deleted_flag(
rec, page_is_comp(buf_block_get_frame(block)));
is_hashed = block->is_hashed;
if (is_hashed) {
/* TO DO: Can we skip this if none of the fields
index->search_info->curr_n_fields
are being updated? */
/* The function row_upd_changes_ord_field_binary works only
if the update vector was built for a clustered index, we must
NOT call it if index is secondary */
......@@ -1784,17 +1799,9 @@ btr_cur_update_in_place(
rw_lock_x_lock(&btr_search_latch);
}
if (!(flags & BTR_KEEP_SYS_FLAG)) {
row_upd_rec_sys_fields(rec, NULL,
index, offsets, trx, roll_ptr);
}
was_delete_marked = rec_get_deleted_flag(
rec, page_is_comp(buf_block_get_frame(block)));
row_upd_rec_in_place(rec, index, offsets, update, page_zip);
if (block->is_hashed) {
if (is_hashed) {
rw_lock_x_unlock(&btr_search_latch);
}
......@@ -2520,7 +2527,8 @@ btr_cur_parse_del_mark_set_clust_rec(
/* We do not need to reserve btr_search_latch, as the page
is only being recovered, and there cannot be a hash index to
it. */
it. Besides, these fields are being updated in place
and the adaptive hash index does not depend on them. */
btr_rec_set_deleted_flag(rec, page_zip, val);
......@@ -2600,9 +2608,9 @@ btr_cur_del_mark_set_clust_rec(
return(err);
}
if (block->is_hashed) {
rw_lock_x_lock(&btr_search_latch);
}
/* The btr_search_latch is not needed here, because
the adaptive hash index does not depend on the delete-mark
and the delete-mark is being updated in place. */
page_zip = buf_block_get_page_zip(block);
......@@ -2616,10 +2624,6 @@ btr_cur_del_mark_set_clust_rec(
index, offsets, trx, roll_ptr);
}
if (block->is_hashed) {
rw_lock_x_unlock(&btr_search_latch);
}
btr_cur_del_mark_set_clust_rec_log(flags, rec, index, val, trx,
roll_ptr, mtr);
......@@ -2695,7 +2699,8 @@ btr_cur_parse_del_mark_set_sec_rec(
/* We do not need to reserve btr_search_latch, as the page
is only being recovered, and there cannot be a hash index to
it. */
it. Besides, the delete-mark flag is being updated in place
and the adaptive hash index does not depend on it. */
btr_rec_set_deleted_flag(rec, page_zip, val);
}
......@@ -2743,16 +2748,11 @@ btr_cur_del_mark_set_sec_rec(
ut_ad(!!page_rec_is_comp(rec)
== dict_table_is_comp(cursor->index->table));
if (block->is_hashed) {
rw_lock_x_lock(&btr_search_latch);
}
/* We do not need to reserve btr_search_latch, as the
delete-mark flag is being updated in place and the adaptive
hash index does not depend on it. */
btr_rec_set_deleted_flag(rec, buf_block_get_page_zip(block), val);
if (block->is_hashed) {
rw_lock_x_unlock(&btr_search_latch);
}
btr_cur_del_mark_set_sec_rec_log(rec, val, mtr);
return(DB_SUCCESS);
......@@ -2772,8 +2772,11 @@ btr_cur_del_unmark_for_ibuf(
uncompressed */
mtr_t* mtr) /*!< in: mtr */
{
/* We do not need to reserve btr_search_latch, as the page has just
been read to the buffer pool and there cannot be a hash index to it. */
/* We do not need to reserve btr_search_latch, as the page
has just been read to the buffer pool and there cannot be
a hash index to it. Besides, the delete-mark flag is being
updated in place and the adaptive hash index does not depend
on it. */
btr_rec_set_deleted_flag(rec, page_zip, FALSE);
......
......@@ -327,7 +327,6 @@ buf_buddy_relocate(
{
buf_page_t* bpage;
const ulint size = BUF_BUDDY_LOW << i;
ullint usec = ut_time_us(NULL);
mutex_t* mutex;
ulint space;
ulint page_no;
......@@ -394,6 +393,7 @@ buf_buddy_relocate(
if (buf_page_can_relocate(bpage)) {
/* Relocate the compressed page. */
ullint usec = ut_time_us(NULL);
ut_a(bpage->zip.data == src);
memcpy(dst, src, size);
bpage->zip.data = dst;
......
......@@ -1766,18 +1766,17 @@ ibuf_add_free_page(void)
block = buf_page_get(
IBUF_SPACE_ID, 0, page_no, RW_X_LATCH, &mtr);
buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE_NEW);
page = buf_block_get_frame(block);
}
ibuf_enter();
mutex_enter(&ibuf_mutex);
root = ibuf_tree_root_get(&mtr);
buf_block_dbg_add_level(block, SYNC_IBUF_TREE_NODE_NEW);
page = buf_block_get_frame(block);
}
/* Add the page to the free list and update the ibuf size data */
flst_add_last(root + PAGE_HEADER + PAGE_BTR_IBUF_FREE_LIST,
......
......@@ -68,10 +68,7 @@ typedef byte page_header_t;
#define PAGE_MAX_TRX_ID 18 /* highest id of a trx which may have modified
a record on the page; a dulint; defined only
in secondary indexes and in the insert buffer
tree; NOTE: this may be modified only
when the thread has an x-latch to the page,
and ALSO an x-latch to btr_search_latch
if there is a hash index to the page! */
tree */
#define PAGE_HEADER_PRIV_END 26 /* end of private data structure of the page
header which are set in a page create */
/*----*/
......
......@@ -157,11 +157,6 @@ row_upd_rec_sys_fields(
{
ut_ad(dict_index_is_clust(index));
ut_ad(rec_offs_validate(rec, index, offsets));
#ifdef UNIV_SYNC_DEBUG
if (!rw_lock_own(&btr_search_latch, RW_LOCK_EX)) {
ut_ad(!buf_block_align(rec)->is_hashed);
}
#endif /* UNIV_SYNC_DEBUG */
if (UNIV_LIKELY_NULL(page_zip)) {
ulint pos = dict_index_get_sys_col_pos(index, DATA_TRX_ID);
......
......@@ -3221,6 +3221,19 @@ check_next_foreign:
"index_id CHAR;\n"
"foreign_id CHAR;\n"
"found INT;\n"
"DECLARE CURSOR cur_fk IS\n"
"SELECT ID FROM SYS_FOREIGN\n"
"WHERE FOR_NAME = :table_name\n"
"AND TO_BINARY(FOR_NAME)\n"
" = TO_BINARY(:table_name)\n"
"LOCK IN SHARE MODE;\n"
"DECLARE CURSOR cur_idx IS\n"
"SELECT ID FROM SYS_INDEXES\n"
"WHERE TABLE_ID = table_id\n"
"LOCK IN SHARE MODE;\n"
"BEGIN\n"
"SELECT ID INTO table_id\n"
"FROM SYS_TABLES\n"
......@@ -3243,13 +3256,9 @@ check_next_foreign:
"IF (:table_name = 'SYS_FOREIGN_COLS') THEN\n"
" found := 0;\n"
"END IF;\n"
"OPEN cur_fk;\n"
"WHILE found = 1 LOOP\n"
" SELECT ID INTO foreign_id\n"
" FROM SYS_FOREIGN\n"
" WHERE FOR_NAME = :table_name\n"
" AND TO_BINARY(FOR_NAME)\n"
" = TO_BINARY(:table_name)\n"
" LOCK IN SHARE MODE;\n"
" FETCH cur_fk INTO foreign_id;\n"
" IF (SQL % NOTFOUND) THEN\n"
" found := 0;\n"
" ELSE\n"
......@@ -3259,12 +3268,11 @@ check_next_foreign:
" WHERE ID = foreign_id;\n"
" END IF;\n"
"END LOOP;\n"
"CLOSE cur_fk;\n"
"found := 1;\n"
"OPEN cur_idx;\n"
"WHILE found = 1 LOOP\n"
" SELECT ID INTO index_id\n"
" FROM SYS_INDEXES\n"
" WHERE TABLE_ID = table_id\n"
" LOCK IN SHARE MODE;\n"
" FETCH cur_idx INTO index_id;\n"
" IF (SQL % NOTFOUND) THEN\n"
" found := 0;\n"
" ELSE\n"
......@@ -3275,6 +3283,7 @@ check_next_foreign:
" AND TABLE_ID = table_id;\n"
" END IF;\n"
"END LOOP;\n"
"CLOSE cur_idx;\n"
"DELETE FROM SYS_COLUMNS\n"
"WHERE TABLE_ID = table_id;\n"
"DELETE FROM SYS_TABLES\n"
......
/*****************************************************************************
Copyright (c) 1997, 2010, Innobase Oy. All Rights Reserved.
Copyright (c) 1997, 2011, Oracle and/or its affiliates. All Rights Reserved.
Copyright (c) 2008, Google Inc.
Portions of this file contain modifications contributed and copyrighted by
......@@ -101,12 +101,17 @@ row_sel_sec_rec_is_for_blob(
ulint clust_len, /*!< in: length of clust_field */
const byte* sec_field, /*!< in: column in secondary index */
ulint sec_len, /*!< in: length of sec_field */
ulint prefix_len, /*!< in: index column prefix length
in bytes */
ulint zip_size) /*!< in: compressed page size, or 0 */
{
ulint len;
byte buf[DICT_MAX_INDEX_COL_LEN];
ut_a(clust_len >= BTR_EXTERN_FIELD_REF_SIZE);
ut_ad(prefix_len >= sec_len);
ut_ad(prefix_len > 0);
ut_a(prefix_len <= sizeof buf);
if (UNIV_UNLIKELY
(!memcmp(clust_field + clust_len - BTR_EXTERN_FIELD_REF_SIZE,
......@@ -118,7 +123,7 @@ row_sel_sec_rec_is_for_blob(
return(FALSE);
}
len = btr_copy_externally_stored_field_prefix(buf, sizeof buf,
len = btr_copy_externally_stored_field_prefix(buf, prefix_len,
zip_size,
clust_field, clust_len);
......@@ -132,7 +137,7 @@ row_sel_sec_rec_is_for_blob(
}
len = dtype_get_at_most_n_mbchars(prtype, mbminlen, mbmaxlen,
sec_len, len, (const char*) buf);
prefix_len, len, (const char*) buf);
return(!cmp_data_data(mtype, prtype, buf, len, sec_field, sec_len));
}
......@@ -219,11 +224,20 @@ row_sel_sec_rec_is_for_clust_rec(
if (rec_offs_nth_extern(clust_offs, clust_pos)
&& len < sec_len) {
/* This function should never be
invoked on an Antelope format table,
because they should always contain
enough prefix in the clustered index
record. */
ut_ad(dict_table_get_format(clust_index->table)
>= DICT_TF_FORMAT_ZIP);
if (!row_sel_sec_rec_is_for_blob(
col->mtype, col->prtype,
col->mbminlen, col->mbmaxlen,
clust_field, clust_len,
sec_field, sec_len,
ifield->prefix_len,
dict_table_zip_size(
clust_index->table))) {
goto inequal;
......@@ -494,7 +508,7 @@ sel_col_prefetch_buf_alloc(
sel_buf = column->prefetch_buf + i;
sel_buf->data = NULL;
sel_buf->len = 0;
sel_buf->val_buf_size = 0;
}
}
......@@ -519,6 +533,8 @@ sel_col_prefetch_buf_free(
mem_free(sel_buf->data);
}
}
mem_free(prefetch_buf);
}
/*********************************************************************//**
......
......@@ -1258,8 +1258,7 @@ sync_thread_add_level(
break;
case SYNC_IBUF_INDEX_TREE:
if (sync_thread_levels_contain(array, SYNC_FSP)) {
ut_a(sync_thread_levels_g(
array, SYNC_FSP_PAGE - 1, TRUE));
ut_a(sync_thread_levels_g(array, level - 1, TRUE));
} else {
ut_a(sync_thread_levels_g(
array, SYNC_IBUF_TREE_NODE - 1, TRUE));
......
......@@ -1099,7 +1099,7 @@ trx_undo_rec_get_partial_row(
/***********************************************************************//**
Erases the unused undo log page end.
@return TRUE if the page contained something, FALSE if it was empty */
static __attribute__((nonnull, warn_unused_result))
static __attribute__((nonnull))
ibool
trx_undo_erase_page_end(
/*====================*/
......@@ -1110,16 +1110,11 @@ trx_undo_erase_page_end(
first_free = mach_read_from_2(undo_page + TRX_UNDO_PAGE_HDR
+ TRX_UNDO_PAGE_FREE);
if (first_free == TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_HDR_SIZE) {
/* This was an empty page to begin with.
Do nothing here; the caller should free the page. */
return(FALSE);
}
memset(undo_page + first_free, 0xff,
(UNIV_PAGE_SIZE - FIL_PAGE_DATA_END) - first_free);
mlog_write_initial_log_record(undo_page, MLOG_UNDO_ERASE_END, mtr);
return(TRUE);
return(first_free != TRX_UNDO_PAGE_HDR + TRX_UNDO_PAGE_HDR_SIZE);
}
/***********************************************************//**
......@@ -1141,11 +1136,7 @@ trx_undo_parse_erase_page_end(
return(ptr);
}
if (!trx_undo_erase_page_end(page, mtr)) {
/* The function trx_undo_erase_page_end() should not
have done anything to an empty page. */
ut_ad(0);
}
trx_undo_erase_page_end(page, mtr);
return(ptr);
}
......@@ -1290,6 +1281,18 @@ trx_undo_report_row_operation(
undo page. Discard the freshly allocated
page and return an error. */
/* When we remove a page from an undo
log, this is analogous to a
pessimistic insert in a B-tree, and we
must reserve the counterpart of the
tree latch, which is the rseg
mutex. We must commit the mini-transaction
first, because it may be holding lower-level
latches, such as SYNC_FSP and SYNC_FSP_PAGE. */
mtr_commit(&mtr);
mtr_start(&mtr);
mutex_enter(&rseg->mutex);
trx_undo_free_last_page(trx, undo, &mtr);
mutex_exit(&rseg->mutex);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment