Commit 01e656a6 authored by Vladislav Vaintroub's avatar Vladislav Vaintroub

Merge branch 'bb-10.1-wlad' into 10.1

parents f9b50c06 bb3f4fbb
......@@ -79,7 +79,7 @@ wf_incremental_init(xb_write_filt_ctxt_t *ctxt, char *dst_name,
cp->delta_buf_base = static_cast<byte *>(ut_malloc(buf_size));
memset(cp->delta_buf_base, 0, buf_size);
cp->delta_buf = static_cast<byte *>
(ut_align(cp->delta_buf_base, UNIV_PAGE_SIZE_MAX));
(ut_align(cp->delta_buf_base, cursor->page_size));
/* write delta meta info */
snprintf(meta_name, sizeof(meta_name), "%s%s", dst_name,
......
......@@ -4953,12 +4953,17 @@ xtrabackup_apply_delta(
if (offset_on_page == 0xFFFFFFFFUL)
break;
success = os_file_write(dst_path, dst_file,
incremental_buffer +
page_in_buffer * page_size,
(offset_on_page <<
page_size_shift),
page_size);
uchar *buf = incremental_buffer + page_in_buffer * page_size;
const os_offset_t off = os_offset_t(offset_on_page)*page_size;
if (off == 0) {
/* Fix tablespace size. */
os_offset_t n_pages = fsp_get_size_low(static_cast<ib_page_t *>(buf));
if (!os_file_set_size(dst_path, dst_file, n_pages*page_size))
goto error;
}
success = os_file_write(dst_path, dst_file, buf, off, page_size);
if (!success) {
goto error;
}
......@@ -4969,8 +4974,11 @@ xtrabackup_apply_delta(
if (incremental_buffer_base)
ut_free(incremental_buffer_base);
if (src_file != XB_FILE_UNDEFINED)
if (src_file != XB_FILE_UNDEFINED) {
os_file_close(src_file);
/* Remove .delta file after it was successfully applied.*/
os_file_delete(0,src_path);
}
if (dst_file != XB_FILE_UNDEFINED)
os_file_close(dst_file);
return TRUE;
......@@ -5790,52 +5798,6 @@ xtrabackup_prepare_func(int argc, char ** argv)
if(innodb_init())
goto error_cleanup;
if (xtrabackup_incremental) {
it = datafiles_iter_new(fil_system);
if (it == NULL) {
msg("xtrabackup: Error: datafiles_iter_new() failed.\n");
exit(EXIT_FAILURE);
}
while ((node = datafiles_iter_next(it)) != NULL) {
byte *header;
ulint size;
ulint actual_size;
mtr_t mtr;
buf_block_t *block;
ulint flags;
space = node->space;
/* Align space sizes along with fsp header. We want to process
each space once, so skip all nodes except the first one in a
multi-node space. */
if (UT_LIST_GET_PREV(chain, node) != NULL) {
continue;
}
mtr_start(&mtr);
mtr_s_lock(fil_space_get_latch(space->id, &flags), &mtr);
block = buf_page_get(space->id,
dict_tf_get_zip_size(flags),
0, RW_S_LATCH, &mtr);
header = FSP_HEADER_OFFSET + buf_block_get_frame(block);
size = mtr_read_ulint(header + FSP_SIZE, MLOG_4BYTES,
&mtr);
mtr_commit(&mtr);
fil_extend_space_to_desired_size(&actual_size, space->id, size);
}
datafiles_iter_free(it);
} /* if (xtrabackup_incremental) */
if (xtrabackup_export) {
msg("xtrabackup: export option is specified.\n");
pfs_os_file_t info_file;
......
......@@ -1037,134 +1037,16 @@ fil_space_extend_must_retry(
page_size = UNIV_PAGE_SIZE;
}
#ifdef _WIN32
const ulint io_completion_type = OS_FILE_READ;
/* Logically or physically extend the file with zero bytes,
depending on whether it is sparse. */
/* FIXME: Call DeviceIoControl(node->handle, FSCTL_SET_SPARSE, ...)
when opening a file when FSP_FLAGS_HAS_PAGE_COMPRESSION(). */
{
FILE_END_OF_FILE_INFO feof;
/* fil_read_first_page() expects UNIV_PAGE_SIZE bytes.
fil_node_open_file() expects at least 4 * UNIV_PAGE_SIZE bytes.
Do not shrink short ROW_FORMAT=COMPRESSED files. */
feof.EndOfFile.QuadPart = std::max(
os_offset_t(size - file_start_page_no) * page_size,
os_offset_t(FIL_IBD_FILE_INITIAL_SIZE
* UNIV_PAGE_SIZE));
*success = SetFileInformationByHandle(node->handle,
FileEndOfFileInfo,
&feof, sizeof feof);
if (!*success) {
ib_logf(IB_LOG_LEVEL_ERROR, "extending file %s"
" from " INT64PF
" to " INT64PF " bytes failed with %u",
node->name,
os_offset_t(node->size) * page_size,
feof.EndOfFile.QuadPart, GetLastError());
} else {
start_page_no = size;
}
}
#else
/* We will logically extend the file with ftruncate() if
page_compression is enabled, because the file is expected to
be sparse in that case. Make sure that ftruncate() can deal
with large files. */
const bool is_sparse = sizeof(off_t) >= 8
&& FSP_FLAGS_HAS_PAGE_COMPRESSION(space->flags);
# ifdef HAVE_POSIX_FALLOCATE
/* We must complete the I/O request after invoking
posix_fallocate() to avoid an assertion failure at shutdown.
Because no actual writes were dispatched, a read operation
will suffice. */
const ulint io_completion_type = srv_use_posix_fallocate
|| is_sparse ? OS_FILE_READ : OS_FILE_WRITE;
if (srv_use_posix_fallocate && !is_sparse) {
const os_offset_t start_offset
= os_offset_t(start_page_no - file_start_page_no)
* page_size;
const ulint n_pages = size - start_page_no;
const os_offset_t len = os_offset_t(n_pages) * page_size;
int err;
do {
err = posix_fallocate(node->handle, start_offset, len);
} while (err == EINTR
&& srv_shutdown_state == SRV_SHUTDOWN_NONE);
fil_node_open_file() expects at least 4 * UNIV_PAGE_SIZE bytes.*/
*success = !err;
if (!*success) {
ib_logf(IB_LOG_LEVEL_ERROR, "extending file %s"
" from " INT64PF " to " INT64PF " bytes"
" failed with error %d",
node->name, start_offset, len + start_offset,
err);
}
os_offset_t new_size = std::max(
os_offset_t(size - file_start_page_no) * page_size,
os_offset_t(FIL_IBD_FILE_INITIAL_SIZE * UNIV_PAGE_SIZE));
DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28",
*success = FALSE;
os_has_said_disk_full = TRUE;);
*success = os_file_set_size(node->name, node->handle, new_size,
FSP_FLAGS_HAS_PAGE_COMPRESSION(space->flags));
if (*success) {
os_has_said_disk_full = FALSE;
start_page_no = size;
}
} else
# else
const ulint io_completion_type = is_sparse
? OS_FILE_READ : OS_FILE_WRITE;
# endif
if (is_sparse) {
/* fil_read_first_page() expects UNIV_PAGE_SIZE bytes.
fil_node_open_file() expects at least 4 * UNIV_PAGE_SIZE bytes.
Do not shrink short ROW_FORMAT=COMPRESSED files. */
off_t s = std::max(off_t(size - file_start_page_no)
* off_t(page_size),
off_t(FIL_IBD_FILE_INITIAL_SIZE
* UNIV_PAGE_SIZE));
*success = !ftruncate(node->handle, s);
if (!*success) {
ib_logf(IB_LOG_LEVEL_ERROR, "ftruncate of file %s"
" from " INT64PF " to " INT64PF " bytes"
" failed with error %d",
node->name,
os_offset_t(start_page_no - file_start_page_no)
* page_size, os_offset_t(s), errno);
} else {
start_page_no = size;
}
} else {
/* Extend at most 64 pages at a time */
ulint buf_size = ut_min(64, size - start_page_no)
* page_size;
byte* buf2 = static_cast<byte*>(
calloc(1, buf_size + page_size));
*success = buf2 != NULL;
if (!buf2) {
ib_logf(IB_LOG_LEVEL_ERROR, "Cannot allocate " ULINTPF
" bytes to extend file",
buf_size + page_size);
}
byte* const buf = static_cast<byte*>(
ut_align(buf2, page_size));
while (*success && start_page_no < size) {
ulint n_pages
= ut_min(buf_size / page_size,
size - start_page_no);
os_offset_t offset = static_cast<os_offset_t>(
start_page_no - file_start_page_no)
* page_size;
*success = os_aio(OS_FILE_WRITE, 0, OS_AIO_SYNC,
node->name, node->handle, buf,
offset, page_size * n_pages,
page_size, node, NULL, 0);
DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28",
*success = FALSE;
......@@ -1172,20 +1054,9 @@ fil_space_extend_must_retry(
if (*success) {
os_has_said_disk_full = FALSE;
}
/* Let us measure the size of the file
to determine how much we were able to
extend it */
os_offset_t fsize = os_file_get_size(node->handle);
ut_a(fsize != os_offset_t(-1));
start_page_no = ulint(fsize / page_size)
+ file_start_page_no;
start_page_no = size;
}
free(buf2);
}
#endif
mutex_enter(&fil_system->mutex);
ut_a(node->being_extended);
......@@ -1195,7 +1066,7 @@ fil_space_extend_must_retry(
space->size += file_size - node->size;
node->size = file_size;
fil_node_complete_io(node, fil_system, io_completion_type);
fil_node_complete_io(node, fil_system, OS_FILE_READ);
node->being_extended = FALSE;
......
......@@ -2340,7 +2340,16 @@ os_file_get_size(
#endif /* __WIN__ */
}
/** Set the size of a newly created file.
/** Extend a file.
On Windows, extending a file allocates blocks for the file,
unless the file is sparse.
On Unix, we will extend the file with ftruncate(), if
file needs to be sparse. Otherwise posix_fallocate() is used
when available, and if not, binary zeroes are added to the end
of file.
@param[in] name file name
@param[in] file file handle
@param[in] size desired file size
......@@ -2391,15 +2400,21 @@ os_file_set_size(
"file %s failed with error %d",
size, name, err);
}
/* Set errno because posix_fallocate() does not do it.*/
errno = err;
return(!err);
}
# endif
os_offset_t current_size = os_file_get_size(file);
if (current_size >= size) {
return true;
}
/* Write up to 1 megabyte at a time. */
ulint buf_size = ut_min(64, (ulint) (size / UNIV_PAGE_SIZE))
* UNIV_PAGE_SIZE;
os_offset_t current_size = 0;
byte* buf2 = static_cast<byte*>(calloc(1, buf_size + UNIV_PAGE_SIZE));
if (!buf2) {
......
......@@ -1045,135 +1045,15 @@ fil_space_extend_must_retry(
page_size = UNIV_PAGE_SIZE;
}
#ifdef _WIN32
const ulint io_completion_type = OS_FILE_READ;
/* Logically or physically extend the file with zero bytes,
depending on whether it is sparse. */
/* FIXME: Call DeviceIoControl(node->handle, FSCTL_SET_SPARSE, ...)
when opening a file when FSP_FLAGS_HAS_PAGE_COMPRESSION(). */
{
FILE_END_OF_FILE_INFO feof;
/* fil_read_first_page() expects UNIV_PAGE_SIZE bytes.
fil_node_open_file() expects at least 4 * UNIV_PAGE_SIZE bytes.
Do not shrink short ROW_FORMAT=COMPRESSED files. */
feof.EndOfFile.QuadPart = std::max(
fil_node_open_file() expects at least 4 * UNIV_PAGE_SIZE bytes.*/
os_offset_t new_size = std::max(
os_offset_t(size - file_start_page_no) * page_size,
os_offset_t(FIL_IBD_FILE_INITIAL_SIZE
* UNIV_PAGE_SIZE));
*success = SetFileInformationByHandle(node->handle,
FileEndOfFileInfo,
&feof, sizeof feof);
if (!*success) {
ib_logf(IB_LOG_LEVEL_ERROR, "extending file %s"
" from " INT64PF
" to " INT64PF " bytes failed with %u",
node->name,
os_offset_t(node->size) * page_size,
feof.EndOfFile.QuadPart, GetLastError());
} else {
start_page_no = size;
}
}
#else
/* We will logically extend the file with ftruncate() if
page_compression is enabled, because the file is expected to
be sparse in that case. Make sure that ftruncate() can deal
with large files. */
const bool is_sparse = sizeof(off_t) >= 8
&& FSP_FLAGS_HAS_PAGE_COMPRESSION(space->flags);
# ifdef HAVE_POSIX_FALLOCATE
/* We must complete the I/O request after invoking
posix_fallocate() to avoid an assertion failure at shutdown.
Because no actual writes were dispatched, a read operation
will suffice. */
const ulint io_completion_type = srv_use_posix_fallocate
|| is_sparse ? OS_FILE_READ : OS_FILE_WRITE;
if (srv_use_posix_fallocate && !is_sparse) {
const os_offset_t start_offset
= os_offset_t(start_page_no - file_start_page_no)
* page_size;
const ulint n_pages = size - start_page_no;
const os_offset_t len = os_offset_t(n_pages) * page_size;
int err;
do {
err = posix_fallocate(node->handle, start_offset, len);
} while (err == EINTR
&& srv_shutdown_state == SRV_SHUTDOWN_NONE);
os_offset_t(FIL_IBD_FILE_INITIAL_SIZE * UNIV_PAGE_SIZE));
*success = !err;
if (!*success) {
ib_logf(IB_LOG_LEVEL_ERROR, "extending file %s"
" from " INT64PF " to " INT64PF " bytes"
" failed with error %d",
node->name, start_offset, len + start_offset,
err);
}
*success = os_file_set_size(node->name, node->handle, new_size,
FSP_FLAGS_HAS_PAGE_COMPRESSION(space->flags));
DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28",
*success = FALSE;
os_has_said_disk_full = TRUE;);
if (*success) {
os_has_said_disk_full = FALSE;
start_page_no = size;
}
} else
# else
const ulint io_completion_type = is_sparse
? OS_FILE_READ : OS_FILE_WRITE;
# endif
if (is_sparse) {
/* fil_read_first_page() expects UNIV_PAGE_SIZE bytes.
fil_node_open_file() expects at least 4 * UNIV_PAGE_SIZE bytes.
Do not shrink short ROW_FORMAT=COMPRESSED files. */
off_t s = std::max(off_t(size - file_start_page_no)
* off_t(page_size),
off_t(FIL_IBD_FILE_INITIAL_SIZE
* UNIV_PAGE_SIZE));
*success = !ftruncate(node->handle, s);
if (!*success) {
ib_logf(IB_LOG_LEVEL_ERROR, "ftruncate of file %s"
" from " INT64PF " to " INT64PF " bytes"
" failed with error %d",
node->name,
os_offset_t(start_page_no - file_start_page_no)
* page_size, os_offset_t(s), errno);
} else {
start_page_no = size;
}
} else {
/* Extend at most 64 pages at a time */
ulint buf_size = ut_min(64, size - start_page_no)
* page_size;
byte* buf2 = static_cast<byte*>(
calloc(1, buf_size + page_size));
*success = buf2 != NULL;
if (!buf2) {
ib_logf(IB_LOG_LEVEL_ERROR, "Cannot allocate " ULINTPF
" bytes to extend file",
buf_size + page_size);
}
byte* const buf = static_cast<byte*>(
ut_align(buf2, page_size));
while (*success && start_page_no < size) {
ulint n_pages
= ut_min(buf_size / page_size,
size - start_page_no);
os_offset_t offset = static_cast<os_offset_t>(
start_page_no - file_start_page_no)
* page_size;
*success = os_aio(OS_FILE_WRITE, 0, OS_AIO_SYNC,
node->name, node->handle, buf,
offset, page_size * n_pages,
page_size, node, NULL,
space->id, NULL, 0);
DBUG_EXECUTE_IF("ib_os_aio_func_io_failure_28",
*success = FALSE;
......@@ -1181,20 +1061,8 @@ fil_space_extend_must_retry(
if (*success) {
os_has_said_disk_full = FALSE;
start_page_no = size;
}
/* Let us measure the size of the file
to determine how much we were able to
extend it */
os_offset_t fsize = os_file_get_size(node->handle);
ut_a(fsize != os_offset_t(-1));
start_page_no = ulint(fsize / page_size)
+ file_start_page_no;
}
free(buf2);
}
#endif
mutex_enter(&fil_system->mutex);
ut_a(node->being_extended);
......@@ -1204,7 +1072,7 @@ fil_space_extend_must_retry(
space->size += file_size - node->size;
node->size = file_size;
fil_node_complete_io(node, fil_system, io_completion_type);
fil_node_complete_io(node, fil_system, OS_FILE_READ);
node->being_extended = FALSE;
......
......@@ -2575,7 +2575,16 @@ os_file_get_size(
#endif /* __WIN__ */
}
/** Set the size of a newly created file.
/** Extend a file.
On Windows, extending a file allocates blocks for the file,
unless the file is sparse.
On Unix, we will extend the file with ftruncate(), if
file needs to be sparse. Otherwise posix_fallocate() is used
when available, and if not, binary zeroes are added to the end
of file.
@param[in] name file name
@param[in] file file handle
@param[in] size desired file size
......@@ -2626,15 +2635,21 @@ os_file_set_size(
"file %s failed with error %d",
size, name, err);
}
/* Set errno because posix_fallocate() does not do it.*/
errno = err;
return(!err);
}
# endif
os_offset_t current_size = os_file_get_size(file);
if (current_size >= size) {
return true;
}
/* Write up to 1 megabyte at a time. */
ulint buf_size = ut_min(64, (ulint) (size / UNIV_PAGE_SIZE))
* UNIV_PAGE_SIZE;
os_offset_t current_size = 0;
byte* buf2 = static_cast<byte*>(calloc(1, buf_size + UNIV_PAGE_SIZE));
if (!buf2) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment