Commit 38e09b2f authored by marko's avatar marko

branches/zip: Merge revisions 720:765 from trunk and reindent the code

as per revisions r763:765.
parent 4b252f24
This diff is collapsed.
This diff is collapsed.
......@@ -133,14 +133,13 @@ btr_pcur_store_position(
}
cursor->old_stored = BTR_PCUR_OLD_STORED;
cursor->old_rec = dict_tree_copy_rec_order_prefix(tree, rec,
&cursor->old_n_fields,
&cursor->old_rec_buf,
&cursor->buf_size);
cursor->old_rec = dict_tree_copy_rec_order_prefix
(tree, rec, &cursor->old_n_fields,
&cursor->old_rec_buf, &cursor->buf_size);
cursor->block_when_stored = buf_block_align(page);
cursor->modify_clock = buf_block_get_modify_clock(
cursor->block_when_stored);
cursor->modify_clock = buf_block_get_modify_clock
(cursor->block_when_stored);
}
/******************************************************************
......@@ -216,19 +215,20 @@ btr_pcur_restore_position(
ut_error;
}
if (UNIV_UNLIKELY(cursor->rel_pos == BTR_PCUR_AFTER_LAST_IN_TREE
if (UNIV_UNLIKELY
(cursor->rel_pos == BTR_PCUR_AFTER_LAST_IN_TREE
|| cursor->rel_pos == BTR_PCUR_BEFORE_FIRST_IN_TREE)) {
/* In these cases we do not try an optimistic restoration,
but always do a search */
btr_cur_open_at_index_side(
cursor->rel_pos == BTR_PCUR_BEFORE_FIRST_IN_TREE,
btr_cur_open_at_index_side
(cursor->rel_pos == BTR_PCUR_BEFORE_FIRST_IN_TREE,
btr_pcur_get_btr_cur(cursor)->index, latch_mode,
btr_pcur_get_btr_cur(cursor), mtr);
cursor->block_when_stored =
buf_block_align(btr_pcur_get_page(cursor));
cursor->block_when_stored
= buf_block_align(btr_pcur_get_page(cursor));
return(FALSE);
}
......@@ -242,7 +242,8 @@ btr_pcur_restore_position(
|| UNIV_LIKELY(latch_mode == BTR_MODIFY_LEAF)) {
/* Try optimistic restoration */
if (UNIV_LIKELY(buf_page_optimistic_get(latch_mode,
if (UNIV_LIKELY
(buf_page_optimistic_get(latch_mode,
cursor->block_when_stored, page,
cursor->modify_clock, mtr))) {
cursor->pos_state = BTR_PCUR_IS_POSITIONED;
......@@ -262,14 +263,16 @@ btr_pcur_restore_position(
index = btr_pcur_get_btr_cur(cursor)->index;
heap = mem_heap_create(256);
offsets1 = rec_get_offsets(cursor->old_rec,
index, NULL,
offsets1 = rec_get_offsets
(cursor->old_rec, index, NULL,
cursor->old_n_fields, &heap);
offsets2 = rec_get_offsets(rec, index, NULL,
offsets2 = rec_get_offsets
(rec, index, NULL,
cursor->old_n_fields, &heap);
ut_ad(cmp_rec_rec(cursor->old_rec,
rec, offsets1, offsets2, index) == 0);
ut_ad(!cmp_rec_rec(cursor->old_rec,
rec, offsets1, offsets2,
index));
mem_heap_free(heap);
#endif /* UNIV_DEBUG */
return(TRUE);
......@@ -308,7 +311,8 @@ btr_pcur_restore_position(
if (cursor->rel_pos == BTR_PCUR_ON
&& btr_pcur_is_on_user_rec(cursor, mtr)
&& 0 == cmp_dtuple_rec(tuple, btr_pcur_get_rec(cursor),
rec_get_offsets(btr_pcur_get_rec(cursor),
rec_get_offsets
(btr_pcur_get_rec(cursor),
btr_pcur_get_btr_cur(cursor)->index,
NULL, ULINT_UNDEFINED, &heap))) {
......@@ -316,10 +320,10 @@ btr_pcur_restore_position(
the cursor can now be on a different page! But we can retain
the value of old_rec */
cursor->block_when_stored =
buf_block_align(btr_pcur_get_page(cursor));
cursor->modify_clock =
buf_block_get_modify_clock(cursor->block_when_stored);
cursor->block_when_stored = buf_block_align
(btr_pcur_get_page(cursor));
cursor->modify_clock = buf_block_get_modify_clock
(cursor->block_when_stored);
cursor->old_stored = BTR_PCUR_OLD_STORED;
mem_heap_free(heap);
......
......@@ -432,8 +432,9 @@ btr_search_update_hash_ref(
}
tree_id = ((cursor->index)->tree)->id;
fold = rec_fold(rec, rec_get_offsets(rec, cursor->index,
offsets_, ULINT_UNDEFINED, &heap),
fold = rec_fold(rec,
rec_get_offsets(rec, cursor->index, offsets_,
ULINT_UNDEFINED, &heap),
block->curr_n_fields,
block->curr_n_bytes, tree_id);
if (UNIV_LIKELY_NULL(heap)) {
......@@ -617,8 +618,8 @@ btr_search_check_guess(
prev_rec = page_rec_get_prev(rec);
if (page_rec_is_infimum(prev_rec)) {
success = btr_page_get_prev(
buf_frame_align(prev_rec), mtr) == FIL_NULL;
success = btr_page_get_prev
(buf_frame_align(prev_rec), mtr) == FIL_NULL;
goto exit_func;
}
......@@ -642,8 +643,8 @@ btr_search_check_guess(
next_rec = page_rec_get_next(rec);
if (page_rec_is_supremum(next_rec)) {
if (btr_page_get_next(
buf_frame_align(next_rec), mtr) == FIL_NULL) {
if (btr_page_get_next
(buf_frame_align(next_rec), mtr) == FIL_NULL) {
cursor->up_match = 0;
success = TRUE;
......@@ -762,10 +763,10 @@ btr_search_guess_on_hash(
if (UNIV_LIKELY(!has_search_latch)) {
if (UNIV_UNLIKELY(!buf_page_get_known_nowait(latch_mode, page,
if (UNIV_UNLIKELY
(!buf_page_get_known_nowait(latch_mode, page,
BUF_MAKE_YOUNG,
__FILE__, __LINE__,
mtr))) {
__FILE__, __LINE__, mtr))) {
goto failure_unlock;
}
......@@ -802,7 +803,8 @@ btr_search_guess_on_hash(
right. */
if (UNIV_EXPECT(ut_dulint_cmp(tree_id, btr_page_get_index_id(page)), 0)
|| !btr_search_check_guess(cursor,
can_only_compare_to_cursor_rec, tuple, mode, mtr)) {
can_only_compare_to_cursor_rec,
tuple, mode, mtr)) {
if (UNIV_LIKELY(!has_search_latch)) {
btr_leaf_page_release(page, latch_mode, mtr);
}
......@@ -1029,8 +1031,10 @@ btr_search_drop_page_hash_index(
/* Corruption */
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Corruption of adaptive hash index. After dropping\n"
"InnoDB: the hash index to a page of %s, still %lu hash nodes remain.\n",
" InnoDB: Corruption of adaptive hash index."
" After dropping\n"
"InnoDB: the hash index to a page of %s,"
" still %lu hash nodes remain.\n",
index->name, (ulong) block->n_pointers);
rw_lock_x_unlock(&btr_search_latch);
......@@ -1378,8 +1382,8 @@ btr_search_update_hash_on_delete(
tree_id = cursor->index->tree->id;
fold = rec_fold(rec, rec_get_offsets(rec, cursor->index, offsets_,
ULINT_UNDEFINED, &heap), block->curr_n_fields,
block->curr_n_bytes, tree_id);
ULINT_UNDEFINED, &heap),
block->curr_n_fields, block->curr_n_bytes, tree_id);
if (UNIV_LIKELY_NULL(heap)) {
mem_heap_free(heap);
}
......@@ -1570,11 +1574,11 @@ btr_search_update_hash_on_insert(
if (side == BTR_SEARCH_RIGHT_SIDE) {
ha_insert_for_fold(table, ins_fold, ins_rec);
/*
/*
fputs("Hash insert for ", stderr);
dict_index_name_print(stderr, cursor->index);
fprintf(stderr, " fold %lu\n", ins_fold);
*/
*/
} else {
ha_insert_for_fold(table, next_fold, next_rec);
}
......@@ -1635,10 +1639,11 @@ btr_search_validate(void)
offsets = rec_get_offsets((rec_t*) node->data,
block->index, offsets,
block->curr_n_fields
+ (block->curr_n_bytes > 0), &heap);
+ (block->curr_n_bytes > 0),
&heap);
if (!block->is_hashed
|| node->fold != rec_fold((rec_t*)(node->data),
if (!block->is_hashed || node->fold
!= rec_fold((rec_t*)(node->data),
offsets,
block->curr_n_fields,
block->curr_n_bytes,
......@@ -1647,28 +1652,36 @@ btr_search_validate(void)
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Error in an adaptive hash index pointer to page %lu\n"
"ptr mem address %p index id %lu %lu, node fold %lu, rec fold %lu\n",
" InnoDB: Error in an adaptive hash"
" index pointer to page %lu\n"
"InnoDB: ptr mem address %p"
" index id %lu %lu,"
" node fold %lu, rec fold %lu\n",
(ulong) buf_frame_get_page_no(page),
node->data,
(ulong) ut_dulint_get_high(btr_page_get_index_id(page)),
(ulong) ut_dulint_get_low(btr_page_get_index_id(page)),
(ulong) ut_dulint_get_high
(btr_page_get_index_id(page)),
(ulong) ut_dulint_get_low
(btr_page_get_index_id(page)),
(ulong) node->fold,
(ulong) rec_fold((rec_t*)(node->data),
offsets,
block->curr_n_fields,
block->curr_n_bytes,
btr_page_get_index_id(page)));
btr_page_get_index_id
(page)));
fputs("InnoDB: Record ", stderr);
rec_print_new(stderr, (rec_t*)node->data,
offsets);
fprintf(stderr, "\nInnoDB: on that page."
"Page mem address %p, is hashed %lu, n fields %lu, n bytes %lu\n"
"side %lu\n",
page, (ulong) block->is_hashed,
" Page mem address %p, is hashed %lu,"
" n fields %lu, n bytes %lu\n"
"InnoDB: side %lu\n",
(void*) page, (ulong) block->is_hashed,
(ulong) block->curr_n_fields,
(ulong) block->curr_n_bytes, (ulong) block->curr_side);
(ulong) block->curr_n_bytes,
(ulong) block->curr_side);
if (n_page_dumps < 20) {
buf_page_print(page, 0);
......
This diff is collapsed.
......@@ -56,8 +56,7 @@ buf_flush_insert_into_flush_list(
ut_a(block->state == BUF_BLOCK_FILE_PAGE);
ut_ad((UT_LIST_GET_FIRST(buf_pool->flush_list) == NULL)
|| (ut_dulint_cmp(
(UT_LIST_GET_FIRST(buf_pool->flush_list))
|| (ut_dulint_cmp((UT_LIST_GET_FIRST(buf_pool->flush_list))
->oldest_modification,
block->oldest_modification) <= 0));
......@@ -119,7 +118,8 @@ buf_flush_ready_for_replace(
if (block->state != BUF_BLOCK_FILE_PAGE) {
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Error: buffer block state %lu in the LRU list!\n",
" InnoDB: Error: buffer block state %lu"
" in the LRU list!\n",
(ulong)block->state);
ut_print_buf(stderr, block, sizeof(buf_block_t));
......@@ -260,36 +260,44 @@ buf_flush_buffered_writes(void)
continue;
}
if (UNIV_UNLIKELY(memcmp(block->frame + (FIL_PAGE_LSN + 4),
if (UNIV_UNLIKELY
(memcmp(block->frame + (FIL_PAGE_LSN + 4),
block->frame + (UNIV_PAGE_SIZE
- FIL_PAGE_END_LSN_OLD_CHKSUM + 4), 4))) {
- FIL_PAGE_END_LSN_OLD_CHKSUM + 4),
4))) {
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: ERROR: The page to be written seems corrupt!\n"
"InnoDB: The lsn fields do not match! Noticed in the buffer pool\n"
"InnoDB: before posting to the doublewrite buffer.\n");
" InnoDB: ERROR: The page to be written"
" seems corrupt!\n"
"InnoDB: The lsn fields do not match!"
" Noticed in the buffer pool\n"
"InnoDB: before posting to the"
" doublewrite buffer.\n");
}
if (!block->check_index_page_at_flush) {
} else if (page_is_comp(block->frame)) {
if (UNIV_UNLIKELY(!page_simple_validate_new(
block->frame))) {
if (UNIV_UNLIKELY
(!page_simple_validate_new(block->frame))) {
corrupted_page:
buf_page_print(block->frame, 0);
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Apparent corruption of an index page n:o %lu in space %lu\n"
"InnoDB: to be written to data file. We intentionally crash server\n"
"InnoDB: to prevent corrupt data from ending up in data\n"
" InnoDB: Apparent corruption of an"
" index page n:o %lu in space %lu\n"
"InnoDB: to be written to data file."
" We intentionally crash server\n"
"InnoDB: to prevent corrupt data"
" from ending up in data\n"
"InnoDB: files.\n",
(ulong) block->offset,
(ulong) block->space);
ut_error;
}
} else if (UNIV_UNLIKELY(!page_simple_validate_old(
block->frame))) {
} else if (UNIV_UNLIKELY
(!page_simple_validate_old(block->frame))) {
goto corrupted_page;
}
......@@ -313,14 +321,17 @@ buf_flush_buffered_writes(void)
len2 += UNIV_PAGE_SIZE, i++) {
block = trx_doublewrite->buf_block_arr[i];
if (UNIV_LIKELY(!block->page_zip.data)
&& UNIV_UNLIKELY(memcmp(write_buf + len2
+ (FIL_PAGE_LSN + 4),
write_buf + len2 + (UNIV_PAGE_SIZE
&& UNIV_UNLIKELY
(memcmp(write_buf + len2 + (FIL_PAGE_LSN + 4),
write_buf + len2
+ (UNIV_PAGE_SIZE
- FIL_PAGE_END_LSN_OLD_CHKSUM + 4), 4))) {
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: ERROR: The page to be written seems corrupt!\n"
"InnoDB: The lsn fields do not match! Noticed in the doublewrite block1.\n");
" InnoDB: ERROR: The page to be written"
" seems corrupt!\n"
"InnoDB: The lsn fields do not match!"
" Noticed in the doublewrite block1.\n");
}
}
......@@ -343,14 +354,18 @@ buf_flush_buffered_writes(void)
len2 += UNIV_PAGE_SIZE, i++) {
block = trx_doublewrite->buf_block_arr[i];
if (UNIV_LIKELY(!block->page_zip.data)
&& UNIV_UNLIKELY(memcmp(write_buf + len2
+ (FIL_PAGE_LSN + 4),
write_buf + len2 + (UNIV_PAGE_SIZE
&& UNIV_UNLIKELY
(memcmp(write_buf + len2 + (FIL_PAGE_LSN + 4),
write_buf + len2
+ (UNIV_PAGE_SIZE
- FIL_PAGE_END_LSN_OLD_CHKSUM + 4), 4))) {
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: ERROR: The page to be written seems corrupt!\n"
"InnoDB: The lsn fields do not match! Noticed in the doublewrite block2.\n");
" InnoDB: ERROR: The page to be"
" written seems corrupt!\n"
"InnoDB: The lsn fields do not match!"
" Noticed in"
" the doublewrite block2.\n");
}
}
......@@ -375,16 +390,22 @@ buf_flush_buffered_writes(void)
(void*)block->page_zip.data,
(void*)block);
continue;
} else if (UNIV_UNLIKELY(memcmp(
block->frame + (FIL_PAGE_LSN + 4),
block->frame + (UNIV_PAGE_SIZE
- FIL_PAGE_END_LSN_OLD_CHKSUM + 4), 4))) {
} else if (UNIV_UNLIKELY
(memcmp(block->frame + (FIL_PAGE_LSN + 4),
block->frame
+ (UNIV_PAGE_SIZE
- FIL_PAGE_END_LSN_OLD_CHKSUM + 4),
4))) {
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: ERROR: The page to be written seems corrupt!\n"
"InnoDB: The lsn fields do not match! Noticed in the buffer pool\n"
"InnoDB: after posting and flushing the doublewrite buffer.\n"
"InnoDB: Page buf fix count %lu, io fix %lu, state %lu\n",
" InnoDB: ERROR: The page to be written"
" seems corrupt!\n"
"InnoDB: The lsn fields do not match!"
" Noticed in the buffer pool\n"
"InnoDB: after posting and flushing"
" the doublewrite buffer.\n"
"InnoDB: Page buf fix count %lu,"
" io fix %lu, state %lu\n",
(ulong)block->buf_fix_count,
(ulong)block->io_fix,
(ulong)block->state);
......@@ -506,9 +527,8 @@ buf_flush_init_for_writing(
mach_write_to_4(page_zip->data
+ FIL_PAGE_SPACE_OR_CHKSUM,
srv_use_checksums
? page_zip_calc_checksum(
page_zip->data,
zip_size)
? page_zip_calc_checksum
(page_zip->data, zip_size)
: BUF_NO_CHECKSUM_MAGIC);
return;
case FIL_PAGE_TYPE_ALLOCATED:
......@@ -536,8 +556,8 @@ buf_flush_init_for_writing(
mach_write_to_4(page_zip->data
+ FIL_PAGE_SPACE_OR_CHKSUM,
srv_use_checksums
? page_zip_calc_checksum(
page_zip->data, zip_size)
? page_zip_calc_checksum
(page_zip->data, zip_size)
: BUF_NO_CHECKSUM_MAGIC);
return;
}
......@@ -558,8 +578,9 @@ buf_flush_init_for_writing(
/* Store the new formula checksum */
mach_write_to_4(page + FIL_PAGE_SPACE_OR_CHKSUM,
srv_use_checksums ?
buf_calc_page_new_checksum(page) : BUF_NO_CHECKSUM_MAGIC);
srv_use_checksums
? buf_calc_page_new_checksum(page)
: BUF_NO_CHECKSUM_MAGIC);
/* We overwrite the first 4 bytes of the end lsn field to store
the old formula checksum. Since it depends also on the field
......@@ -567,8 +588,9 @@ buf_flush_init_for_writing(
new formula checksum. */
mach_write_to_4(page + UNIV_PAGE_SIZE - FIL_PAGE_END_LSN_OLD_CHKSUM,
srv_use_checksums ?
buf_calc_page_old_checksum(page) : BUF_NO_CHECKSUM_MAGIC);
srv_use_checksums
? buf_calc_page_old_checksum(page)
: BUF_NO_CHECKSUM_MAGIC);
}
/************************************************************************
......@@ -594,8 +616,8 @@ buf_flush_write_block_low(
#ifdef UNIV_LOG_DEBUG
if (!univ_log_debug_warned) {
univ_log_debug_warned = TRUE;
fputs(
"Warning: cannot force log to disk if UNIV_LOG_DEBUG is defined!\n"
fputs("Warning: cannot force log to disk if"
" UNIV_LOG_DEBUG is defined!\n"
"Crash recovery will not work!\n",
stderr);
}
......@@ -785,7 +807,8 @@ buf_flush_try_page(
#ifdef UNIV_DEBUG
if (buf_debug_prints) {
fprintf(stderr,
"Flushing single page space %lu, page no %lu \n",
"Flushing single page space %lu,"
" page no %lu \n",
(ulong) block->space,
(ulong) block->offset);
}
......@@ -975,9 +998,8 @@ buf_flush_batch(
old_page_count = page_count;
/* Try to flush also all the neighbors */
page_count +=
buf_flush_try_neighbors(space, offset,
flush_type);
page_count += buf_flush_try_neighbors
(space, offset, flush_type);
/* fprintf(stderr,
"Flush type %lu, page no %lu, neighb %lu\n",
flush_type, offset,
......
......@@ -103,7 +103,7 @@ buf_LRU_invalidate_tablespace(
if (block->space == id) {
#ifdef UNIV_DEBUG
if (buf_debug_prints) {
printf(
fprintf(stderr,
"Dropping space %lu page %lu\n",
(ulong) block->space,
(ulong) block->offset);
......@@ -216,7 +216,8 @@ buf_LRU_search_and_free_block(
#ifdef UNIV_DEBUG
if (buf_debug_prints) {
fprintf(stderr,
"Putting space %lu page %lu to free list\n",
"Putting space %lu page %lu"
" to free list\n",
(ulong) block->space,
(ulong) block->offset);
}
......@@ -346,14 +347,19 @@ buf_LRU_get_free_block(
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: ERROR: over 95 percent of the buffer pool is occupied by\n"
"InnoDB: lock heaps or the adaptive hash index! Check that your\n"
"InnoDB: transactions do not set too many row locks.\n"
"InnoDB: Your buffer pool size is %lu MB. Maybe you should make\n"
"InnoDB: the buffer pool bigger?\n"
"InnoDB: We intentionally generate a seg fault to print a stack trace\n"
"InnoDB: on Linux!\n",
(ulong)(buf_pool->curr_size / (1024 * 1024 / UNIV_PAGE_SIZE)));
" InnoDB: ERROR: over 95 percent of the buffer pool"
" is occupied by\n"
"InnoDB: lock heaps or the adaptive hash index!"
" Check that your\n"
"InnoDB: transactions do not set too many row locks.\n"
"InnoDB: Your buffer pool size is %lu MB."
" Maybe you should make\n"
"InnoDB: the buffer pool bigger?\n"
"InnoDB: We intentionally generate a seg fault"
" to print a stack trace\n"
"InnoDB: on Linux!\n",
(ulong) (buf_pool->curr_size
/ (1024 * 1024 / UNIV_PAGE_SIZE)));
ut_error;
......@@ -368,14 +374,20 @@ buf_LRU_get_free_block(
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: WARNING: over 67 percent of the buffer pool is occupied by\n"
"InnoDB: lock heaps or the adaptive hash index! Check that your\n"
"InnoDB: transactions do not set too many row locks.\n"
"InnoDB: Your buffer pool size is %lu MB. Maybe you should make\n"
"InnoDB: the buffer pool bigger?\n"
"InnoDB: Starting the InnoDB Monitor to print diagnostics, including\n"
"InnoDB: lock heap and hash index sizes.\n",
(ulong) (buf_pool->curr_size / (1024 * 1024 / UNIV_PAGE_SIZE)));
" InnoDB: WARNING: over 67 percent of"
" the buffer pool is occupied by\n"
"InnoDB: lock heaps or the adaptive"
" hash index! Check that your\n"
"InnoDB: transactions do not set too many"
" row locks.\n"
"InnoDB: Your buffer pool size is %lu MB."
" Maybe you should make\n"
"InnoDB: the buffer pool bigger?\n"
"InnoDB: Starting the InnoDB Monitor to print"
" diagnostics, including\n"
"InnoDB: lock heap and hash index sizes.\n",
(ulong) (buf_pool->curr_size
/ (1024 * 1024 / UNIV_PAGE_SIZE)));
buf_lru_switched_on_innodb_mon = TRUE;
srv_print_innodb_monitor = TRUE;
......@@ -407,7 +419,8 @@ buf_LRU_get_free_block(
/* Remove from the list of mapped pages */
UT_LIST_REMOVE(awe_LRU_free_mapped,
buf_pool->awe_LRU_free_mapped, block);
buf_pool->awe_LRU_free_mapped,
block);
} else {
/* We map the page to a frame; second param
FALSE below because we do not want it to be
......@@ -460,15 +473,22 @@ buf_LRU_get_free_block(
ut_print_timestamp(stderr);
fprintf(stderr,
"InnoDB: Warning: difficult to find free blocks from\n"
"InnoDB: the buffer pool (%lu search iterations)! Consider\n"
"InnoDB: the buffer pool (%lu search iterations)!"
" Consider\n"
"InnoDB: increasing the buffer pool size.\n"
"InnoDB: It is also possible that in your Unix version\n"
"InnoDB: fsync is very slow, or completely frozen inside\n"
"InnoDB: the OS kernel. Then upgrading to a newer version\n"
"InnoDB: of your operating system may help. Look at the\n"
"InnoDB: It is also possible that"
" in your Unix version\n"
"InnoDB: fsync is very slow, or"
" completely frozen inside\n"
"InnoDB: the OS kernel. Then upgrading to"
" a newer version\n"
"InnoDB: of your operating system may help."
" Look at the\n"
"InnoDB: number of fsyncs in diagnostic info below.\n"
"InnoDB: Pending flushes (fsync) log: %lu; buffer pool: %lu\n"
"InnoDB: %lu OS file reads, %lu OS file writes, %lu OS fsyncs\n"
"InnoDB: Pending flushes (fsync) log: %lu;"
" buffer pool: %lu\n"
"InnoDB: %lu OS file reads, %lu OS file writes,"
" %lu OS fsyncs\n"
"InnoDB: Starting InnoDB Monitor to print further\n"
"InnoDB: diagnostics to the standard output.\n",
(ulong) n_iterations,
......@@ -541,16 +561,16 @@ buf_LRU_old_adjust_len(void)
if (old_len < new_len - BUF_LRU_OLD_TOLERANCE) {
buf_pool->LRU_old = UT_LIST_GET_PREV(LRU,
buf_pool->LRU_old);
buf_pool->LRU_old = UT_LIST_GET_PREV
(LRU, buf_pool->LRU_old);
(buf_pool->LRU_old)->old = TRUE;
buf_pool->LRU_old_len++;
} else if (old_len > new_len + BUF_LRU_OLD_TOLERANCE) {
(buf_pool->LRU_old)->old = FALSE;
buf_pool->LRU_old = UT_LIST_GET_NEXT(LRU,
buf_pool->LRU_old);
buf_pool->LRU_old = UT_LIST_GET_NEXT
(LRU, buf_pool->LRU_old);
buf_pool->LRU_old_len--;
} else {
ut_a(buf_pool->LRU_old); /* Check that we did not
......@@ -895,15 +915,20 @@ buf_LRU_block_remove_hashed_page(
if (block != buf_page_hash_get(block->space, block->offset)) {
fprintf(stderr,
"InnoDB: Error: page %lu %lu not found from the hash table\n",
"InnoDB: Error: page %lu %lu not found"
" in the hash table\n",
(ulong) block->space,
(ulong) block->offset);
if (buf_page_hash_get(block->space, block->offset)) {
fprintf(stderr,
"InnoDB: From hash table we find block %p of %lu %lu which is not %p\n",
(void*) buf_page_hash_get(block->space, block->offset),
(ulong) buf_page_hash_get(block->space, block->offset)->space,
(ulong) buf_page_hash_get(block->space, block->offset)->offset,
"InnoDB: In hash table we find block"
" %p of %lu %lu which is not %p\n",
(void*) buf_page_hash_get
(block->space, block->offset),
(ulong) buf_page_hash_get
(block->space, block->offset)->space,
(ulong) buf_page_hash_get
(block->space, block->offset)->offset,
(void*) block);
}
......@@ -1030,7 +1055,8 @@ buf_LRU_print(void)
ut_ad(buf_pool);
mutex_enter(&(buf_pool->mutex));
fprintf(stderr, "Pool ulint clock %lu\n", (ulong) buf_pool->ulint_clock);
fprintf(stderr, "Pool ulint clock %lu\n",
(ulong) buf_pool->ulint_clock);
block = UT_LIST_GET_FIRST(buf_pool->LRU);
......@@ -1060,7 +1086,8 @@ buf_LRU_print(void)
fprintf(stderr, "\nLRU pos %lu type %lu index id %lu\n",
(ulong) block->LRU_position,
(ulong) fil_page_get_type(frame),
(ulong) ut_dulint_get_low(btr_page_get_index_id(frame)));
(ulong) ut_dulint_get_low
(btr_page_get_index_id(frame)));
block = UT_LIST_GET_NEXT(LRU, block);
}
......
......@@ -90,24 +90,13 @@ buf_read_page_low(
+ TRX_SYS_DOUBLEWRITE_BLOCK_SIZE))) {
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Warning: trying to read doublewrite buffer page %lu\n",
" InnoDB: Warning: trying to read"
" doublewrite buffer page %lu\n",
(ulong) offset);
return(0);
}
#ifdef UNIV_LOG_DEBUG
if (space % 2 == 1) {
/* We are updating a replicate space while holding the
log mutex: the read must be handled before other reads
which might incur ibuf operations and thus write to the log */
fputs("Log debug: reading replicate page in sync mode\n",
stderr);
sync = TRUE;
}
#endif
if (ibuf_bitmap_page(zip_size, offset)
|| trx_sys_hdr_page(space, offset)) {
......@@ -135,7 +124,8 @@ buf_read_page_low(
if (buf_debug_prints) {
fprintf(stderr,
"Posting read request for page %lu, sync %lu\n",
(ulong) offset, (ulong) sync);
(ulong) offset,
(ulong) sync);
}
#endif
......@@ -144,11 +134,11 @@ buf_read_page_low(
if (zip_size) {
*err = fil_io(OS_FILE_READ | wake_later,
sync, space, zip_size, offset, 0, zip_size,
(void*)block->page_zip.data, (void*)block);
(void*) block->page_zip.data, (void*) block);
} else {
*err = fil_io(OS_FILE_READ | wake_later,
sync, space, 0, offset, 0, UNIV_PAGE_SIZE,
(void*)block->frame, (void*)block);
(void*) block->frame, (void*) block);
}
ut_a(*err == DB_SUCCESS);
......@@ -234,8 +224,8 @@ buf_read_ahead_random(
mutex_enter(&(buf_pool->mutex));
if (buf_pool->n_pend_reads >
buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
if (buf_pool->n_pend_reads
> buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
mutex_exit(&(buf_pool->mutex));
return(0);
......@@ -278,16 +268,18 @@ buf_read_ahead_random(
mode: hence FALSE as the first parameter */
if (!ibuf_bitmap_page(zip_size, i)) {
count += buf_read_page_low(&err, FALSE, ibuf_mode
| OS_AIO_SIMULATED_WAKE_LATER,
space, zip_size,
tablespace_version, i);
count += buf_read_page_low
(&err, FALSE,
ibuf_mode | OS_AIO_SIMULATED_WAKE_LATER,
space, zip_size, tablespace_version, i);
if (err == DB_TABLESPACE_DELETED) {
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Warning: in random readahead trying to access tablespace\n"
"InnoDB: %lu page no. %lu,\n"
"InnoDB: but the tablespace does not exist or is just being dropped.\n",
" InnoDB: Warning: in random"
" readahead trying to access\n"
"InnoDB: tablespace %lu page %lu,\n"
"InnoDB: but the tablespace does not"
" exist or is just being dropped.\n",
(ulong) space, (ulong) i);
}
}
......@@ -347,8 +339,10 @@ buf_read_page(
if (err == DB_TABLESPACE_DELETED) {
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Error: trying to access tablespace %lu page no. %lu,\n"
"InnoDB: but the tablespace does not exist or is just being dropped.\n",
" InnoDB: Error: trying to access"
" tablespace %lu page no. %lu,\n"
"InnoDB: but the tablespace does not exist"
" or is just being dropped.\n",
(ulong) space, (ulong) offset);
}
......@@ -449,8 +443,8 @@ buf_read_ahead_linear(
return(0);
}
if (buf_pool->n_pend_reads >
buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
if (buf_pool->n_pend_reads
> buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
mutex_exit(&(buf_pool->mutex));
return(0);
......@@ -486,8 +480,8 @@ buf_read_ahead_linear(
}
}
if (fail_count > BUF_READ_AHEAD_LINEAR_AREA -
BUF_READ_AHEAD_LINEAR_THRESHOLD) {
if (fail_count > BUF_READ_AHEAD_LINEAR_AREA
- BUF_READ_AHEAD_LINEAR_THRESHOLD) {
/* Too many failures: return */
mutex_exit(&(buf_pool->mutex));
......@@ -572,16 +566,18 @@ buf_read_ahead_linear(
aio mode: hence FALSE as the first parameter */
if (!ibuf_bitmap_page(zip_size, i)) {
count += buf_read_page_low(&err, FALSE, ibuf_mode
| OS_AIO_SIMULATED_WAKE_LATER,
space, zip_size,
tablespace_version, i);
count += buf_read_page_low
(&err, FALSE,
ibuf_mode | OS_AIO_SIMULATED_WAKE_LATER,
space, zip_size, tablespace_version, i);
if (err == DB_TABLESPACE_DELETED) {
ut_print_timestamp(stderr);
fprintf(stderr,
" InnoDB: Warning: in linear readahead trying to access tablespace\n"
"InnoDB: %lu page no. %lu,\n"
"InnoDB: but the tablespace does not exist or is just being dropped.\n",
" InnoDB: Warning: in"
" linear readahead trying to access\n"
"InnoDB: tablespace %lu page %lu,\n"
"InnoDB: but the tablespace does not"
" exist or is just being dropped.\n",
(ulong) space, (ulong) i);
}
}
......@@ -636,8 +632,8 @@ buf_read_ibuf_merge_pages(
#ifdef UNIV_IBUF_DEBUG
ut_a(n_stored < UNIV_PAGE_SIZE);
#endif
while (buf_pool->n_pend_reads >
buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
while (buf_pool->n_pend_reads
> buf_pool->curr_size / BUF_READ_AHEAD_PEND_LIMIT) {
os_thread_sleep(500000);
}
......@@ -707,9 +703,12 @@ buf_read_recv_pages(
if (count > 100) {
fprintf(stderr,
"InnoDB: Error: InnoDB has waited for 50 seconds for pending\n"
"InnoDB: reads to the buffer pool to be finished.\n"
"InnoDB: Number of pending reads %lu, pending pread calls %lu\n",
"InnoDB: Error: InnoDB has waited for"
" 50 seconds for pending\n"
"InnoDB: reads to the buffer pool to"
" be finished.\n"
"InnoDB: Number of pending reads %lu,"
" pending pread calls %lu\n",
(ulong) buf_pool->n_pend_reads,
(ulong)os_file_n_pending_preads);
......@@ -721,11 +720,13 @@ buf_read_recv_pages(
if ((i + 1 == n_stored) && sync) {
buf_read_page_low(&err, TRUE, BUF_READ_ANY_PAGE, space,
zip_size, tablespace_version, page_nos[i]);
zip_size, tablespace_version,
page_nos[i]);
} else {
buf_read_page_low(&err, FALSE, BUF_READ_ANY_PAGE
| OS_AIO_SIMULATED_WAKE_LATER, space,
zip_size, tablespace_version, page_nos[i]);
| OS_AIO_SIMULATED_WAKE_LATER,
space, zip_size,
tablespace_version, page_nos[i]);
}
}
......@@ -737,7 +738,8 @@ buf_read_recv_pages(
#ifdef UNIV_DEBUG
if (buf_debug_prints) {
fprintf(stderr,
"Recovery applies read-ahead pages %lu\n", (ulong) n_stored);
"Recovery applies read-ahead pages %lu\n",
(ulong) n_stored);
}
#endif /* UNIV_DEBUG */
}
......@@ -193,7 +193,7 @@ dfield_check_typed_no_assert(
|| dfield_get_type(field)->mtype < DATA_VARCHAR) {
fprintf(stderr,
"InnoDB: Error: data field type %lu, len %lu\n",
"InnoDB: Error: data field type %lu, len %lu\n",
(ulong) dfield_get_type(field)->mtype,
(ulong) dfield_get_len(field));
return(FALSE);
......@@ -216,9 +216,9 @@ dtuple_check_typed_no_assert(
if (dtuple_get_n_fields(tuple) > REC_MAX_N_FIELDS) {
fprintf(stderr,
"InnoDB: Error: index entry has %lu fields\n",
"InnoDB: Error: index entry has %lu fields\n",
(ulong) dtuple_get_n_fields(tuple));
dump:
dump:
fputs("InnoDB: Tuple contents: ", stderr);
dtuple_print(stderr, tuple);
putc('\n', stderr);
......@@ -251,7 +251,7 @@ dfield_check_typed(
|| dfield_get_type(field)->mtype < DATA_VARCHAR) {
fprintf(stderr,
"InnoDB: Error: data field type %lu, len %lu\n",
"InnoDB: Error: data field type %lu, len %lu\n",
(ulong) dfield_get_type(field)->mtype,
(ulong) dfield_get_len(field));
......@@ -434,15 +434,20 @@ dfield_print_also_hex(
/*****************************************************************
Print a dfield value using ut_print_buf. */
static
void
dfield_print_raw(
/*=============*/
FILE* f, /* in: output stream */
dfield_t* dfield) /* in: dfield */
{
if (dfield->len != UNIV_SQL_NULL) {
ut_print_buf(f, dfield->data, dfield->len);
ulint len = dfield->len;
if (len != UNIV_SQL_NULL) {
ulint print_len = ut_min(len, 1000);
ut_print_buf(f, dfield->data, print_len);
if (len != print_len) {
fprintf(f, "(total %lu bytes)", (ulong) len);
}
} else {
fputs(" SQL NULL", f);
}
......@@ -518,7 +523,8 @@ dtuple_convert_big_rec(
if (UNIV_UNLIKELY(size > 1000000000)) {
fprintf(stderr,
"InnoDB: Warning: tuple size very big: %lu\n", (ulong) size);
"InnoDB: Warning: tuple size very big: %lu\n",
(ulong) size);
fputs("InnoDB: Tuple contents: ", stderr);
dtuple_print(stderr, entry);
putc('\n', stderr);
......@@ -606,8 +612,8 @@ dtuple_convert_big_rec(
vector->fields[n_fields].len = dfield->len
- DICT_MAX_INDEX_COL_LEN;
vector->fields[n_fields].data = mem_heap_alloc(heap,
vector->fields[n_fields].len);
vector->fields[n_fields].data = mem_heap_alloc
(heap, vector->fields[n_fields].len);
/* Copy data (from the end of field) to big rec vector */
......
......@@ -67,8 +67,8 @@ dtype_get_at_most_n_mbchars(
if (dtype->mbminlen != dtype->mbmaxlen) {
ut_a(!(prefix_len % dtype->mbmaxlen));
return(innobase_get_at_most_n_mbchars(
dtype_get_charset_coll(dtype->prtype),
return(innobase_get_at_most_n_mbchars
(dtype_get_charset_coll(dtype->prtype),
prefix_len, data_len, str));
}
......
......@@ -241,9 +241,9 @@ dict_boot(void)
..._MARGIN, it will immediately be updated to the disk-based
header. */
dict_sys->row_id = ut_dulint_add(
ut_dulint_align_up(
mtr_read_dulint(dict_hdr + DICT_HDR_ROW_ID, &mtr),
dict_sys->row_id = ut_dulint_add
(ut_dulint_align_up(mtr_read_dulint
(dict_hdr + DICT_HDR_ROW_ID, &mtr),
DICT_HDR_ROW_ID_WRITE_MARGIN),
DICT_HDR_ROW_ID_WRITE_MARGIN);
......@@ -267,14 +267,16 @@ dict_boot(void)
dict_sys->sys_tables = table;
index = dict_mem_index_create("SYS_TABLES", "CLUST_IND",
DICT_HDR_SPACE, DICT_UNIQUE | DICT_CLUSTERED, 1);
DICT_HDR_SPACE,
DICT_UNIQUE | DICT_CLUSTERED, 1);
dict_mem_index_add_field(index, "NAME", 0);
index->id = DICT_TABLES_ID;
success = dict_index_add_to_cache(table, index, mtr_read_ulint(
dict_hdr + DICT_HDR_TABLES, MLOG_4BYTES, &mtr));
success = dict_index_add_to_cache(table, index, mtr_read_ulint
(dict_hdr + DICT_HDR_TABLES,
MLOG_4BYTES, &mtr));
ut_a(success);
/*-------------------------*/
index = dict_mem_index_create("SYS_TABLES", "ID_IND",
......@@ -282,8 +284,10 @@ dict_boot(void)
dict_mem_index_add_field(index, "ID", 0);
index->id = DICT_TABLE_IDS_ID;
success = dict_index_add_to_cache(table, index, mtr_read_ulint(
dict_hdr + DICT_HDR_TABLE_IDS, MLOG_4BYTES, &mtr));
success = dict_index_add_to_cache(table, index,
mtr_read_ulint
(dict_hdr + DICT_HDR_TABLE_IDS,
MLOG_4BYTES, &mtr));
ut_a(success);
/*-------------------------*/
table = dict_mem_table_create("SYS_COLUMNS", DICT_HDR_SPACE, 7, 0);
......@@ -302,14 +306,16 @@ dict_boot(void)
dict_sys->sys_columns = table;
index = dict_mem_index_create("SYS_COLUMNS", "CLUST_IND",
DICT_HDR_SPACE, DICT_UNIQUE | DICT_CLUSTERED, 2);
DICT_HDR_SPACE,
DICT_UNIQUE | DICT_CLUSTERED, 2);
dict_mem_index_add_field(index, "TABLE_ID", 0);
dict_mem_index_add_field(index, "POS", 0);
index->id = DICT_COLUMNS_ID;
success = dict_index_add_to_cache(table, index, mtr_read_ulint(
dict_hdr + DICT_HDR_COLUMNS, MLOG_4BYTES, &mtr));
success = dict_index_add_to_cache(table, index, mtr_read_ulint
(dict_hdr + DICT_HDR_COLUMNS,
MLOG_4BYTES, &mtr));
ut_a(success);
/*-------------------------*/
table = dict_mem_table_create("SYS_INDEXES", DICT_HDR_SPACE, 7, 0);
......@@ -338,14 +344,16 @@ dict_boot(void)
dict_sys->sys_indexes = table;
index = dict_mem_index_create("SYS_INDEXES", "CLUST_IND",
DICT_HDR_SPACE, DICT_UNIQUE | DICT_CLUSTERED, 2);
DICT_HDR_SPACE,
DICT_UNIQUE | DICT_CLUSTERED, 2);
dict_mem_index_add_field(index, "TABLE_ID", 0);
dict_mem_index_add_field(index, "ID", 0);
index->id = DICT_INDEXES_ID;
success = dict_index_add_to_cache(table, index, mtr_read_ulint(
dict_hdr + DICT_HDR_INDEXES, MLOG_4BYTES, &mtr));
success = dict_index_add_to_cache(table, index, mtr_read_ulint
(dict_hdr + DICT_HDR_INDEXES,
MLOG_4BYTES, &mtr));
ut_a(success);
/*-------------------------*/
table = dict_mem_table_create("SYS_FIELDS", DICT_HDR_SPACE, 3, 0);
......@@ -359,14 +367,16 @@ dict_boot(void)
dict_sys->sys_fields = table;
index = dict_mem_index_create("SYS_FIELDS", "CLUST_IND",
DICT_HDR_SPACE, DICT_UNIQUE | DICT_CLUSTERED, 2);
DICT_HDR_SPACE,
DICT_UNIQUE | DICT_CLUSTERED, 2);
dict_mem_index_add_field(index, "INDEX_ID", 0);
dict_mem_index_add_field(index, "POS", 0);
index->id = DICT_FIELDS_ID;
success = dict_index_add_to_cache(table, index, mtr_read_ulint(
dict_hdr + DICT_HDR_FIELDS, MLOG_4BYTES, &mtr));
success = dict_index_add_to_cache(table, index, mtr_read_ulint
(dict_hdr + DICT_HDR_FIELDS,
MLOG_4BYTES, &mtr));
ut_a(success);
mtr_commit(&mtr);
......
......@@ -229,8 +229,8 @@ dict_build_table_def_step(
row_len = 0;
for (i = 0; i < table->n_def; i++) {
row_len += dtype_get_min_size(dict_col_get_type(
&table->cols[i]));
row_len += dtype_get_min_size(dict_col_get_type
(&table->cols[i]));
}
if (row_len > BTR_PAGE_MAX_REC_SIZE) {
return(DB_TOO_BIG_RECORD);
......@@ -258,10 +258,11 @@ dict_build_table_def_step(
is_path = FALSE;
}
error = fil_create_new_single_table_tablespace(
&(table->space), path_or_name, is_path,
error = fil_create_new_single_table_tablespace
(&(table->space), path_or_name, is_path,
dict_table_zip_size(table),
FIL_IBD_FILE_INITIAL_SIZE);
if (error != DB_SUCCESS) {
return(error);
......@@ -698,7 +699,8 @@ dict_drop_index_tree(
btr_free_root(space, root_page_no, mtr);
page_rec_write_index_page_no(rec,
DICT_SYS_INDEXES_PAGE_NO_FIELD, FIL_NULL, mtr);
DICT_SYS_INDEXES_PAGE_NO_FIELD,
FIL_NULL, mtr);
}
/***********************************************************************
......@@ -781,8 +783,8 @@ dict_truncate_index_tree(
appropriate field in the SYS_INDEXES record: this mini-transaction
marks the B-tree totally truncated */
comp = page_is_comp(btr_page_get(
space, root_page_no, RW_X_LATCH, mtr));
comp = page_is_comp(btr_page_get
(space, root_page_no, RW_X_LATCH, mtr));
btr_free_root(space, root_page_no, mtr);
/* We will temporarily write FIL_NULL to the PAGE_NO field
......@@ -1181,13 +1183,15 @@ dict_create_or_check_foreign_constraint_tables(void)
if (table1) {
fprintf(stderr,
"InnoDB: dropping incompletely created SYS_FOREIGN table\n");
"InnoDB: dropping incompletely created"
" SYS_FOREIGN table\n");
row_drop_table_for_mysql("SYS_FOREIGN", trx, TRUE);
}
if (table2) {
fprintf(stderr,
"InnoDB: dropping incompletely created SYS_FOREIGN_COLS table\n");
"InnoDB: dropping incompletely created"
" SYS_FOREIGN_COLS table\n");
row_drop_table_for_mysql("SYS_FOREIGN_COLS", trx, TRUE);
}
......@@ -1208,13 +1212,19 @@ dict_create_or_check_foreign_constraint_tables(void)
"PROCEDURE CREATE_FOREIGN_SYS_TABLES_PROC () IS\n"
"BEGIN\n"
"CREATE TABLE\n"
"SYS_FOREIGN(ID CHAR, FOR_NAME CHAR, REF_NAME CHAR, N_COLS INT);\n"
"CREATE UNIQUE CLUSTERED INDEX ID_IND ON SYS_FOREIGN (ID);\n"
"CREATE INDEX FOR_IND ON SYS_FOREIGN (FOR_NAME);\n"
"CREATE INDEX REF_IND ON SYS_FOREIGN (REF_NAME);\n"
"SYS_FOREIGN(ID CHAR, FOR_NAME CHAR,"
" REF_NAME CHAR, N_COLS INT);\n"
"CREATE UNIQUE CLUSTERED INDEX ID_IND"
" ON SYS_FOREIGN (ID);\n"
"CREATE INDEX FOR_IND"
" ON SYS_FOREIGN (FOR_NAME);\n"
"CREATE INDEX REF_IND"
" ON SYS_FOREIGN (REF_NAME);\n"
"CREATE TABLE\n"
"SYS_FOREIGN_COLS(ID CHAR, POS INT, FOR_COL_NAME CHAR, REF_COL_NAME CHAR);\n"
"CREATE UNIQUE CLUSTERED INDEX ID_IND ON SYS_FOREIGN_COLS (ID, POS);\n"
"SYS_FOREIGN_COLS(ID CHAR, POS INT,"
" FOR_COL_NAME CHAR, REF_COL_NAME CHAR);\n"
"CREATE UNIQUE CLUSTERED INDEX ID_IND"
" ON SYS_FOREIGN_COLS (ID, POS);\n"
"COMMIT WORK;\n"
"END;\n"
, FALSE, trx);
......@@ -1225,10 +1235,11 @@ dict_create_or_check_foreign_constraint_tables(void)
ut_a(error == DB_OUT_OF_FILE_SPACE);
fprintf(stderr, "InnoDB: creation failed\n");
fprintf(stderr, "InnoDB: tablespace is full\n");
fprintf(stderr,
"InnoDB: dropping incompletely created SYS_FOREIGN tables\n");
"InnoDB: creation failed\n"
"InnoDB: tablespace is full\n"
"InnoDB: dropping incompletely created"
" SYS_FOREIGN tables\n");
row_drop_table_for_mysql("SYS_FOREIGN", trx, TRUE);
row_drop_table_for_mysql("SYS_FOREIGN_COLS", trx, TRUE);
......@@ -1244,7 +1255,8 @@ dict_create_or_check_foreign_constraint_tables(void)
if (error == DB_SUCCESS) {
fprintf(stderr,
"InnoDB: Foreign key constraint system tables created\n");
"InnoDB: Foreign key constraint system tables"
" created\n");
}
return(error);
......@@ -1306,7 +1318,8 @@ dict_foreign_eval_sql(
" for table ", ef);
ut_print_name(ef, trx, TRUE, table->name);
fputs(".\n"
"See the MySQL .err log in the datadir for more information.\n", ef);
"See the MySQL .err log in the datadir"
" for more information.\n", ef);
mutex_exit(&dict_foreign_err_mutex);
return(error);
......@@ -1340,13 +1353,13 @@ dict_create_add_foreign_field_to_dictionary(
pars_info_add_str_literal(info, "ref_col_name",
foreign->referenced_col_names[field_nr]);
return dict_foreign_eval_sql(info,
"PROCEDURE P () IS\n"
return(dict_foreign_eval_sql
(info, "PROCEDURE P () IS\n"
"BEGIN\n"
"INSERT INTO SYS_FOREIGN_COLS VALUES"
"(:id, :pos, :for_col_name, :ref_col_name);\n"
"END;\n"
, table, foreign, trx);
"END;\n",
table, foreign, trx));
}
/************************************************************************
......@@ -1405,8 +1418,8 @@ dict_create_add_foreign_to_dictionary(
}
for (i = 0; i < foreign->n_fields; i++) {
error = dict_create_add_foreign_field_to_dictionary(i,
table, foreign, trx);
error = dict_create_add_foreign_field_to_dictionary
(i, table, foreign, trx);
if (error != DB_SUCCESS) {
......@@ -1452,7 +1465,8 @@ dict_create_add_foreigns_to_dictionary(
if (NULL == dict_table_get_low("SYS_FOREIGN")) {
fprintf(stderr,
"InnoDB: table SYS_FOREIGN not found from internal data dictionary\n");
"InnoDB: table SYS_FOREIGN not found"
" in internal data dictionary\n");
return(DB_ERROR);
}
......@@ -1461,8 +1475,8 @@ dict_create_add_foreigns_to_dictionary(
foreign;
foreign = UT_LIST_GET_NEXT(foreign_list, foreign)) {
error = dict_create_add_foreign_to_dictionary(&number,
table, foreign, trx);
error = dict_create_add_foreign_to_dictionary
(&number, table, foreign, trx);
if (error != DB_SUCCESS) {
......
This diff is collapsed.
This diff is collapsed.
......@@ -333,10 +333,10 @@ eval_predefined_2(
ut_ad(len2 >= len1);
if (len2 > len1) {
int_val = (lint)(len1 +
(eval_rnd % (len2 - len1 + 1)));
int_val = (lint) (len1
+ (eval_rnd % (len2 - len1 + 1)));
} else {
int_val = (lint)len1;
int_val = (lint) len1;
}
eval_rnd = ut_rnd_gen_next_ulint(eval_rnd);
......@@ -741,7 +741,8 @@ eval_predefined(
uint_val = (ulint) int_val;
}
for (tmp = int_len; uint_val > 0; uint_val /= 10) {
data[--tmp] = (byte) ('0' + (byte)(uint_val % 10));
data[--tmp] = (byte)
('0' + (byte)(uint_val % 10));
}
}
......
......@@ -51,7 +51,8 @@ if_step(
for (;;) {
eval_exp(elsif_node->cond);
if (eval_node_get_ibool_val(elsif_node->cond)) {
if (eval_node_get_ibool_val
(elsif_node->cond)) {
/* The condition evaluated to TRUE:
start execution from the first
......@@ -192,8 +193,8 @@ for_step(
loop_var_value = eval_node_get_int_val(node->loop_start_limit);
node->loop_end_value = eval_node_get_int_val(
node->loop_end_limit);
node->loop_end_value
= eval_node_get_int_val(node->loop_end_limit);
}
/* Check if we should do another loop */
......
This diff is collapsed.
This diff is collapsed.
......@@ -310,8 +310,9 @@ ha_validate(
if (hash_calc_hash(node->fold, table) != i) {
ut_print_timestamp(stderr);
fprintf(stderr,
"InnoDB: Error: hash table node fold value %lu does not\n"
"InnoDB: match with the cell number %lu.\n",
"InnoDB: Error: hash table node"
" fold value %lu does not\n"
"InnoDB: match the cell number %lu.\n",
(ulong) node->fold, (ulong) i);
ok = FALSE;
......@@ -363,6 +364,7 @@ ha_print_info(
n_bufs++;
}
fprintf(file, ", node heap has %lu buffer(s)\n", (ulong) n_bufs);
fprintf(file, ", node heap has %lu buffer(s)\n",
(ulong) n_bufs);
}
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment