Commit 7dcb8816 authored by Marko Mäkelä's avatar Marko Mäkelä

Merge 10.1 into 10.2

parents 8acb2b7b 84be33ab
......@@ -1523,12 +1523,12 @@ ANALYZE
"key_length": "5",
"used_key_parts": ["a"],
"ref": ["test.t3.a"],
"r_loops": 0,
"r_loops": 1,
"rows": 1,
"r_rows": null,
"r_rows": 10,
"r_total_time_ms": "REPLACED",
"filtered": 100,
"r_filtered": null,
"r_filtered": 100,
"index_condition_bka": "t4.b + 1 <= t3.b + 1"
},
"buffer_type": "flat",
......
......@@ -662,7 +662,7 @@
VARIABLE_NAME INNODB_VERSION
SESSION_VALUE NULL
-GLOBAL_VALUE 5.6.37
+GLOBAL_VALUE 5.6.36-82.1
+GLOBAL_VALUE 5.6.36-82.2
GLOBAL_VALUE_ORIGIN COMPILE-TIME
DEFAULT_VALUE NULL
VARIABLE_SCOPE GLOBAL
......
......@@ -374,18 +374,9 @@ ADD_CUSTOM_TARGET(
SET_TARGET_PROPERTIES(GenServerSource PROPERTIES EXCLUDE_FROM_ALL TRUE)
IF(WIN32 OR HAVE_DLOPEN AND NOT DISABLE_SHARED)
ADD_LIBRARY(udf_example MODULE udf_example.c)
ADD_LIBRARY(udf_example MODULE udf_example.c udf_example.def)
SET_TARGET_PROPERTIES(udf_example PROPERTIES PREFIX "")
# udf_example depends on strings
IF(WIN32)
IF(MSVC)
SET_TARGET_PROPERTIES(udf_example PROPERTIES LINK_FLAGS "/DEF:${CMAKE_CURRENT_SOURCE_DIR}/udf_example.def")
ENDIF()
TARGET_LINK_LIBRARIES(udf_example strings)
ELSE()
# udf_example is using safemutex exported by mysqld
TARGET_LINK_LIBRARIES(udf_example mysqld)
ENDIF()
ENDIF()
CONFIGURE_FILE(
......
......@@ -3862,6 +3862,7 @@ int JOIN_TAB_SCAN_MRR::open()
/* Dynamic range access is never used with BKA */
DBUG_ASSERT(join_tab->use_quick != 2);
join_tab->tracker->r_scans++;
save_or_restore_used_tabs(join_tab, FALSE);
init_mrr_buff();
......@@ -3905,6 +3906,8 @@ int JOIN_TAB_SCAN_MRR::next()
int rc= join_tab->table->file->multi_range_read_next((range_id_t*)ptr) ? -1 : 0;
if (!rc)
{
join_tab->tracker->r_rows++;
join_tab->tracker->r_rows_after_where++;
/*
If a record in in an incremental cache contains no fields then the
association for the last record in cache will be equal to cache->end_pos
......
......@@ -1716,16 +1716,12 @@ PageConverter::update_records(
m_rec_iter.open(block);
while (!m_rec_iter.end()) {
rec_t* rec = m_rec_iter.current();
/* FIXME: Move out of the loop */
if (rec_get_status(rec) == REC_STATUS_NODE_PTR) {
break;
if (!page_is_leaf(block->frame)) {
return DB_SUCCESS;
}
while (!m_rec_iter.end()) {
rec_t* rec = m_rec_iter.current();
ibool deleted = rec_get_deleted_flag(rec, comp);
/* For the clustered index we have to adjust the BLOB
......
......@@ -660,11 +660,24 @@ int maria_create(const char *name, enum data_file_type datafile_type,
if (length > max_key_length)
max_key_length= length;
tot_length+= ((max_rows/(ulong) (((uint) maria_block_size -
if (tot_length == ULLONG_MAX)
continue;
ulonglong tot_length_part= (max_rows/(ulong) (((uint) maria_block_size -
MAX_KEYPAGE_HEADER_SIZE -
KEYPAGE_CHECKSUM_SIZE)/
(length*2))) *
maria_block_size);
(length*2)));
if (tot_length_part >= (ULLONG_MAX / maria_block_size +
ULLONG_MAX % maria_block_size))
tot_length= ULLONG_MAX;
else
{
if (tot_length > ULLONG_MAX - tot_length_part * maria_block_size)
tot_length= ULLONG_MAX;
else
tot_length+= tot_length_part * maria_block_size;
}
}
unique_key_parts=0;
......@@ -673,11 +686,24 @@ int maria_create(const char *name, enum data_file_type datafile_type,
uniquedef->key=keys+i;
unique_key_parts+=uniquedef->keysegs;
share.state.key_root[keys+i]= HA_OFFSET_ERROR;
tot_length+= (max_rows/(ulong) (((uint) maria_block_size -
if (tot_length == ULLONG_MAX)
continue;
ulonglong tot_length_part= (max_rows/(ulong) (((uint) maria_block_size -
MAX_KEYPAGE_HEADER_SIZE -
KEYPAGE_CHECKSUM_SIZE) /
((MARIA_UNIQUE_HASH_LENGTH + pointer)*2)))*
(ulong) maria_block_size;
((MARIA_UNIQUE_HASH_LENGTH + pointer)*2)));
if (tot_length_part >= (ULLONG_MAX / maria_block_size +
ULLONG_MAX % maria_block_size))
tot_length= ULLONG_MAX;
else
{
if (tot_length > ULLONG_MAX - tot_length_part * maria_block_size)
tot_length= ULLONG_MAX;
else
tot_length+= tot_length_part * maria_block_size;
}
}
keys+=uniques; /* Each unique has 1 key */
key_segs+=uniques; /* Each unique has 1 key seg */
......@@ -746,8 +772,7 @@ int maria_create(const char *name, enum data_file_type datafile_type,
Get estimate for index file length (this may be wrong for FT keys)
This is used for pointers to other key pages.
*/
tmp= (tot_length + maria_block_size * keys *
MARIA_INDEX_BLOCK_MARGIN) / maria_block_size;
tmp= (tot_length / maria_block_size + keys * MARIA_INDEX_BLOCK_MARGIN);
/*
use maximum of key_file_length we calculated and key_file_length value we
......
......@@ -293,6 +293,9 @@ link_directories(
if(MRN_BUNDLED)
target_link_libraries(mroonga ${MRN_LIBRARIES})
if(NOT TARGET mroonga)
return()
endif()
else()
add_library(mroonga MODULE ${MRN_ALL_SOURCES})
......
......@@ -2599,7 +2599,7 @@ log_group_read_log_seg(
start_lsn += len;
buf += len;
if (recv_sys && recv_sys->report(ut_time())) {
if (recv_recovery_is_on() && recv_sys && recv_sys->report(ut_time())) {
ib_logf(IB_LOG_LEVEL_INFO, "Read redo log up to LSN=" LSN_PF,
start_lsn);
sd_notifyf(0, "STATUS=Read redo log up to LSN=" LSN_PF,
......
......@@ -1793,16 +1793,12 @@ PageConverter::update_records(
m_rec_iter.open(block);
while (!m_rec_iter.end()) {
rec_t* rec = m_rec_iter.current();
/* FIXME: Move out of the loop */
if (rec_get_status(rec) == REC_STATUS_NODE_PTR) {
break;
if (!page_is_leaf(block->frame)) {
return DB_SUCCESS;
}
while (!m_rec_iter.end()) {
rec_t* rec = m_rec_iter.current();
ibool deleted = rec_get_deleted_flag(rec, comp);
/* For the clustered index we have to adjust the BLOB
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment