Commit da05d027 authored by Vladislav Vaintroub's avatar Vladislav Vaintroub

merge 10.1->10.2

Some innobase/xtrabackup changes around  from 10.1 are null merged
, in partucular using os_set_file_size to extend tablespaces in server
or mariabackup.
They require non-trivial amount of additional  work in 10.2, due to
innobase differences between 10.1 and 10.2
parents ea4e8bab 01e656a6
......@@ -79,7 +79,7 @@ wf_incremental_init(xb_write_filt_ctxt_t *ctxt, char *dst_name,
cp->delta_buf_base = static_cast<byte *>(malloc(buf_size));
memset(cp->delta_buf_base, 0, buf_size);
cp->delta_buf = static_cast<byte *>
(ut_align(cp->delta_buf_base, UNIV_PAGE_SIZE_MAX));
(ut_align(cp->delta_buf_base, cursor->page_size.physical()));
/* write delta meta info */
snprintf(meta_name, sizeof(meta_name), "%s%s", dst_name,
......
......@@ -2140,3 +2140,35 @@ drop database db1;
connection default;
disconnect con1;
set global sql_mode=default;
USE test;
#
# End of 10.0 tests
#
#
# Start of 10.1 tests
#
#
# MDEV-13242 Wrong results for queries with row constructors and information_schema
#
CREATE TABLE tt1(c1 INT);
CREATE TABLE tt2(c2 INT);
SELECT count(*) FROM information_schema.columns WHERE table_schema='test' AND (table_name, column_name) IN (('tt1', 'c1'));
count(*)
1
SELECT count(*) FROM information_schema.columns WHERE table_schema='test' AND (table_name, column_name) IN (('tt2', 'c2'));
count(*)
1
SELECT count(*) FROM information_schema.columns WHERE table_schema='test' AND (table_name, column_name) IN (('tt1','c1'),('tt2', 'c2'));
count(*)
2
SELECT count(*) FROM information_schema.columns WHERE table_schema='test' AND (table_name, column_name) IN (SELECT 'tt1','c1' FROM dual UNION SELECT 'tt2', 'c2' FROM dual);
count(*)
2
SELECT count(*) FROM information_schema.columns WHERE table_schema='test' AND (table_name='tt1' AND column_name='c1') OR (table_name='tt2' AND column_name='c2');
count(*)
2
SELECT column_name FROM information_schema.columns WHERE (table_name, column_name) IN (('tt1','c1'),('tt2', 'c2')) ORDER BY column_name;
column_name
c1
c2
DROP TABLE tt1, tt2;
......@@ -1861,3 +1861,29 @@ disconnect con1;
--source include/wait_until_count_sessions.inc
set global sql_mode=default;
USE test;
--echo #
--echo # End of 10.0 tests
--echo #
--echo #
--echo # Start of 10.1 tests
--echo #
--echo #
--echo # MDEV-13242 Wrong results for queries with row constructors and information_schema
--echo #
CREATE TABLE tt1(c1 INT);
CREATE TABLE tt2(c2 INT);
SELECT count(*) FROM information_schema.columns WHERE table_schema='test' AND (table_name, column_name) IN (('tt1', 'c1'));
SELECT count(*) FROM information_schema.columns WHERE table_schema='test' AND (table_name, column_name) IN (('tt2', 'c2'));
SELECT count(*) FROM information_schema.columns WHERE table_schema='test' AND (table_name, column_name) IN (('tt1','c1'),('tt2', 'c2'));
SELECT count(*) FROM information_schema.columns WHERE table_schema='test' AND (table_name, column_name) IN (SELECT 'tt1','c1' FROM dual UNION SELECT 'tt2', 'c2' FROM dual);
SELECT count(*) FROM information_schema.columns WHERE table_schema='test' AND (table_name='tt1' AND column_name='c1') OR (table_name='tt2' AND column_name='c2');
SELECT column_name FROM information_schema.columns WHERE (table_name, column_name) IN (('tt1','c1'),('tt2', 'c2')) ORDER BY column_name;
DROP TABLE tt1, tt2;
......@@ -4514,6 +4514,8 @@ class Item_cache_wrapper :public Item_result_field
bool fix_fields(THD *thd, Item **it);
void cleanup();
Item *get_orig_item() const { return orig_item; }
/* Methods of getting value which should be cached in the cache */
void save_val(Field *to);
double val_real();
......
......@@ -3712,6 +3712,15 @@ bool uses_only_table_name_fields(Item *item, TABLE_LIST *table)
return 0;
}
}
else if (item->type() == Item::ROW_ITEM)
{
Item_row *item_row= static_cast<Item_row*>(item);
for (uint i= 0; i < item_row->cols(); i++)
{
if (!uses_only_table_name_fields(item_row->element_index(i), table))
return 0;
}
}
else if (item->type() == Item::FIELD_ITEM)
{
Item_field *item_field= (Item_field*)item;
......@@ -3731,6 +3740,11 @@ bool uses_only_table_name_fields(Item *item, TABLE_LIST *table)
strlen(item_field->field_name))))
return 0;
}
else if (item->type() == Item::EXPR_CACHE_ITEM)
{
Item_cache_wrapper *tmp= static_cast<Item_cache_wrapper*>(item);
return uses_only_table_name_fields(tmp->get_orig_item(), table);
}
else if (item->type() == Item::REF_ITEM)
return uses_only_table_name_fields(item->real_item(), table);
......
......@@ -824,7 +824,6 @@ buf_flush_update_zip_checksum(
static_cast<srv_checksum_algorithm_t>(srv_checksum_algorithm));
mach_write_to_8(page + FIL_PAGE_LSN, lsn);
memset(page + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, 0, 8);
mach_write_to_4(page + FIL_PAGE_SPACE_OR_CHKSUM, checksum);
}
......@@ -1077,7 +1076,6 @@ buf_flush_write_block_low(
bpage->newest_modification);
ut_a(page_zip_verify_checksum(frame, bpage->size.physical()));
memset(frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, 0, 8);
break;
case BUF_BLOCK_FILE_PAGE:
frame = bpage->zip.data;
......
......@@ -757,7 +757,6 @@ buf_flush_update_zip_checksum(
srv_checksum_algorithm)));
mach_write_to_8(page + FIL_PAGE_LSN, lsn);
memset(page + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, 0, 8);
mach_write_to_4(page + FIL_PAGE_SPACE_OR_CHKSUM, checksum);
}
......@@ -935,8 +934,6 @@ buf_flush_write_block_low(
bpage->newest_modification);
ut_a(page_zip_verify_checksum(frame, zip_size));
memset(frame + FIL_PAGE_FILE_FLUSH_LSN_OR_KEY_VERSION, 0, 8);
break;
case BUF_BLOCK_FILE_PAGE:
frame = bpage->zip.data;
......
......@@ -1045,156 +1045,24 @@ fil_space_extend_must_retry(
page_size = UNIV_PAGE_SIZE;
}
#ifdef _WIN32
const ulint io_completion_type = OS_FILE_READ;
/* Logically or physically extend the file with zero bytes,
depending on whether it is sparse. */
/* fil_read_first_page() expects UNIV_PAGE_SIZE bytes.
fil_node_open_file() expects at least 4 * UNIV_PAGE_SIZE bytes.*/
os_offset_t new_size = std::max(
os_offset_t(size - file_start_page_no) * page_size,
os_offset_t(FIL_IBD_FILE_INITIAL_SIZE * UNIV_PAGE_SIZE));
/* FIXME: Call DeviceIoControl(node->handle, FSCTL_SET_SPARSE, ...)
when opening a file when FSP_FLAGS_HAS_PAGE_COMPRESSION(). */
{
FILE_END_OF_FILE_INFO feof;
/* fil_read_first_page() expects UNIV_PAGE_SIZE bytes.
fil_node_open_file() expects at least 4 * UNIV_PAGE_SIZE bytes.
Do not shrink short ROW_FORMAT=COMPRESSED files. */
feof.EndOfFile.QuadPart = std::max(
os_offset_t(size - file_start_page_no) * page_size,
os_offset_t(FIL_IBD_FILE_INITIAL_SIZE
* UNIV_PAGE_SIZE));
*success = SetFileInformationByHandle(node->handle,
FileEndOfFileInfo,
&feof, sizeof feof);
if (!*success) {
ib_logf(IB_LOG_LEVEL_ERROR, "extending file %s"
" from " INT64PF
" to " INT64PF " bytes failed with %u",
node->name,
os_offset_t(node->size) * page_size,
feof.EndOfFile.QuadPart, GetLastError());
} else {
start_page_no = size;
}
}
#else
/* We will logically extend the file with ftruncate() if
page_compression is enabled, because the file is expected to
be sparse in that case. Make sure that ftruncate() can deal
with large files. */
const bool is_sparse = sizeof(off_t) >= 8
&& FSP_FLAGS_HAS_PAGE_COMPRESSION(space->flags);
# ifdef HAVE_POSIX_FALLOCATE
/* We must complete the I/O request after invoking
posix_fallocate() to avoid an assertion failure at shutdown.
Because no actual writes were dispatched, a read operation
will suffice. */
const ulint io_completion_type = srv_use_posix_fallocate
|| is_sparse ? OS_FILE_READ : OS_FILE_WRITE;
if (srv_use_posix_fallocate && !is_sparse) {
const os_offset_t start_offset
= os_offset_t(start_page_no - file_start_page_no)
* page_size;
const ulint n_pages = size - start_page_no;
const os_offset_t len = os_offset_t(n_pages) * page_size;
int err;
do {
err = posix_fallocate(node->handle, start_offset, len);
} while (err == EINTR
&& srv_shutdown_state == SRV_SHUTDOWN_NONE);
*success = !err;
if (!*success) {
ib_logf(IB_LOG_LEVEL_ERROR, "extending file %s"
" from " INT64PF " to " INT64PF " bytes"
" failed with error %d",
node->name, start_offset, len + start_offset,
err);
}
DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28",
*success = FALSE;
os_has_said_disk_full = TRUE;);
if (*success) {
os_has_said_disk_full = FALSE;
start_page_no = size;
}
} else
# else
const ulint io_completion_type = is_sparse
? OS_FILE_READ : OS_FILE_WRITE;
# endif
if (is_sparse) {
/* fil_read_first_page() expects UNIV_PAGE_SIZE bytes.
fil_node_open_file() expects at least 4 * UNIV_PAGE_SIZE bytes.
Do not shrink short ROW_FORMAT=COMPRESSED files. */
off_t s = std::max(off_t(size - file_start_page_no)
* off_t(page_size),
off_t(FIL_IBD_FILE_INITIAL_SIZE
* UNIV_PAGE_SIZE));
*success = !ftruncate(node->handle, s);
if (!*success) {
ib_logf(IB_LOG_LEVEL_ERROR, "ftruncate of file %s"
" from " INT64PF " to " INT64PF " bytes"
" failed with error %d",
node->name,
os_offset_t(start_page_no - file_start_page_no)
* page_size, os_offset_t(s), errno);
} else {
start_page_no = size;
}
} else {
/* Extend at most 64 pages at a time */
ulint buf_size = ut_min(64, size - start_page_no)
* page_size;
byte* buf2 = static_cast<byte*>(
calloc(1, buf_size + page_size));
*success = buf2 != NULL;
if (!buf2) {
ib_logf(IB_LOG_LEVEL_ERROR, "Cannot allocate " ULINTPF
" bytes to extend file",
buf_size + page_size);
}
byte* const buf = static_cast<byte*>(
ut_align(buf2, page_size));
while (*success && start_page_no < size) {
ulint n_pages
= ut_min(buf_size / page_size,
size - start_page_no);
os_offset_t offset = static_cast<os_offset_t>(
start_page_no - file_start_page_no)
* page_size;
*success = os_aio(OS_FILE_WRITE, 0, OS_AIO_SYNC,
node->name, node->handle, buf,
offset, page_size * n_pages,
page_size, node, NULL,
space->id, NULL, 0);
DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28",
*success = FALSE;
os_has_said_disk_full = TRUE;);
if (*success) {
os_has_said_disk_full = FALSE;
}
/* Let us measure the size of the file
to determine how much we were able to
extend it */
os_offset_t fsize = os_file_get_size(node->handle);
ut_a(fsize != os_offset_t(-1));
*success = os_file_set_size(node->name, node->handle, new_size,
FSP_FLAGS_HAS_PAGE_COMPRESSION(space->flags));
start_page_no = ulint(fsize / page_size)
+ file_start_page_no;
}
free(buf2);
DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28",
*success = FALSE;
os_has_said_disk_full = TRUE;);
if (*success) {
os_has_said_disk_full = FALSE;
start_page_no = size;
}
#endif
mutex_enter(&fil_system->mutex);
ut_a(node->being_extended);
......@@ -1204,7 +1072,7 @@ fil_space_extend_must_retry(
space->size += file_size - node->size;
node->size = file_size;
fil_node_complete_io(node, fil_system, io_completion_type);
fil_node_complete_io(node, fil_system, OS_FILE_READ);
node->being_extended = FALSE;
......
......@@ -2575,7 +2575,16 @@ os_file_get_size(
#endif /* __WIN__ */
}
/** Set the size of a newly created file.
/** Extend a file.
On Windows, extending a file allocates blocks for the file,
unless the file is sparse.
On Unix, we will extend the file with ftruncate(), if
file needs to be sparse. Otherwise posix_fallocate() is used
when available, and if not, binary zeroes are added to the end
of file.
@param[in] name file name
@param[in] file file handle
@param[in] size desired file size
......@@ -2626,15 +2635,21 @@ os_file_set_size(
"file %s failed with error %d",
size, name, err);
}
/* Set errno because posix_fallocate() does not do it.*/
errno = err;
return(!err);
}
# endif
os_offset_t current_size = os_file_get_size(file);
if (current_size >= size) {
return true;
}
/* Write up to 1 megabyte at a time. */
ulint buf_size = ut_min(64, (ulint) (size / UNIV_PAGE_SIZE))
* UNIV_PAGE_SIZE;
os_offset_t current_size = 0;
byte* buf2 = static_cast<byte*>(calloc(1, buf_size + UNIV_PAGE_SIZE));
if (!buf2) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment